code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Any = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_A : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 100 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
SCREAMING_SNAKE_CASE_:Optional[int] = logging.getLogger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = bnb_quantization_config.load_in_abit
A : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
A : Any = []
# custom device map
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(device_map.keys() ) > 1:
A : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A : int = get_keys_to_not_convert(_lowerCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowerCAmelCase )
A : Optional[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A : Dict = []
A : Tuple = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowerCAmelCase )
# compatibility with peft
A : Union[str, Any] = load_in_abit
A : Tuple = load_in_abit
A : List[str] = get_parameter_device(_lowerCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
A : Optional[int] = replace_with_bnb_layers(_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
# convert param to the right dtype
A : Tuple = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A : Optional[Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
A : int = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowerCAmelCase ):
param.to(_lowerCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
A : str = replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
A : Optional[Any] = get_quantized_model_device_map(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_memory=_lowerCAmelCase , no_split_module_classes=_lowerCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A : Tuple = True
A : int = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCAmelCase , offload_state_dict=_lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowerCAmelCase , device_map=_lowerCAmelCase , offload_dir=_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[int]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
A : Optional[int] = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
A : Tuple = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A : Any = {}
A : List[str] = special_dtypes
A : Any = no_split_module_classes
A : Union[str, Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A : Tuple = get_balanced_memory(
_lowerCAmelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=_lowerCAmelCase , **_lowerCAmelCase , )
A : int = max_memory
A : Any = infer_auto_device_map(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# check if don't have any quantized module on the cpu
A : Optional[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A : Optional[int] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
A : Optional[Any] = []
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> int:
"""simple docstring"""
A : Optional[int] = False
for name, module in model.named_children():
if current_key_name is None:
A : int = []
current_key_name.append(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A : Dict = """.""".join(_lowerCAmelCase )
A : Optional[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A : Dict = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A : Optional[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
A : Any = module.weight.data
if module.bias is not None:
A : Any = module.bias.data
bnb_module.requires_grad_(_lowerCAmelCase )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Dict = True
if len(list(module.children() ) ) > 0:
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __UpperCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
with init_empty_weights():
A : Tuple = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A : Optional[int] = find_tied_parameters(_lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A : Optional[int] = sum(_lowerCAmelCase , [] )
A : Tuple = len(_lowerCAmelCase ) > 0
# Check if it is a base model
A : List[str] = False
if hasattr(_lowerCAmelCase , """base_model_prefix""" ):
A : Optional[Any] = not hasattr(_lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A : str = list(model.named_children() )
A : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
A : int = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
A : Optional[Any] = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase )
# remove ".weight" from the keys
A : Union[str, Any] = [""".weight""", """.bias"""]
A : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A : List[str] = name.replace(_lowerCAmelCase , """""" )
filtered_module_names.append(_lowerCAmelCase )
return filtered_module_names
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
for m in model.modules():
if isinstance(_lowerCAmelCase , bnb.nn.Linearabit ):
return True
return False
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return next(parameter.parameters() ).device
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , 0 , dtype=_lowerCAmelCase , value=_lowerCAmelCase )
A : Tuple = param_name
A : Union[str, Any] = model
if "." in tensor_name:
A : int = tensor_name.split(""".""" )
for split in splits[:-1]:
A : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
A : Optional[Any] = new_module
A : List[str] = splits[-1]
# offload weights
A : Optional[int] = False
offload_weight(module._parameters[tensor_name] , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase , )
else:
offload_weight(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
offload_weight(_lowerCAmelCase , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase )
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , """meta""" , dtype=_lowerCAmelCase , value=torch.empty(*param.size() ) )
| 662 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def a__ ( A__, A__, A__, A__, A__, A__ = None, ):
SCREAMING_SNAKE_CASE_ : int = {}
if train_file is not None:
SCREAMING_SNAKE_CASE_ : Dict = [train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE_ : str = [eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE_ : str = [test_file]
SCREAMING_SNAKE_CASE_ : Tuple = datasets.load_dataset('csv', data_files=A__ )
SCREAMING_SNAKE_CASE_ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE_ : List[str] = features_name.pop(A__ )
SCREAMING_SNAKE_CASE_ : int = list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE_ : Any = {label: i for i, label in enumerate(A__ )}
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
if len(A__ ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=A__, max_length=A__, padding='max_length' ), batched=A__, )
elif len(A__ ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE_ : Optional[int] = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=A__, max_length=A__, padding='max_length', ), batched=A__, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE_ : Tuple = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE_ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE_ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE_ : Dict = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE_ : Tuple = labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE_ : Optional[int] = (
tf.data.Dataset.from_generator(
A__, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE_ : int = (
tf.data.Dataset.from_generator(
A__, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = (
tf.data.Dataset.from_generator(
A__, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase__ : Union[str, Any] =logging.getLogger(__name__)
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase = field(metadata={"""help""": """Which column contains the label"""} )
_UpperCAmelCase = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The path of the training file"""} )
_UpperCAmelCase = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The path of the development file"""} )
_UpperCAmelCase = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The path of the test file"""} )
_UpperCAmelCase = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_UpperCAmelCase = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
def a__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
F'''16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=A__, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(A__ ), labelaid=A__, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='text-classification', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE_ : Optional[int] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('.bin' in model_args.model_name_or_path ), config=A__, cache_dir=model_args.cache_dir, )
def compute_metrics(A__ ) -> Dict:
SCREAMING_SNAKE_CASE_ : Tuple = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ : int = TFTrainer(
model=A__, args=A__, train_dataset=A__, eval_dataset=A__, compute_metrics=A__, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE_ : List[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
SCREAMING_SNAKE_CASE_ : Dict = trainer.evaluate()
SCREAMING_SNAKE_CASE_ : int = os.path.join(training_args.output_dir, 'eval_results.txt' )
with open(A__, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
results.update(A__ )
return results
if __name__ == "__main__":
main()
| 101 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
A : Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
A : Dict = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
A : Tuple = parser.parse_args()
if not hasattr(_lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
A : Any = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 662 | 0 |
"""simple docstring"""
import math
import os
import sys
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = """"""
try:
with open(SCREAMING_SNAKE_CASE , """rb""" ) as binary_file:
UpperCamelCase : Any = binary_file.read()
for dat in data:
UpperCamelCase : Optional[int] = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lexicon.pop(SCREAMING_SNAKE_CASE )
UpperCamelCase : int = last_match_id
if math.loga(SCREAMING_SNAKE_CASE ).is_integer():
for curr_key in lexicon:
UpperCamelCase : List[Any] = """0""" + lexicon[curr_key]
UpperCamelCase : Optional[Any] = bin(SCREAMING_SNAKE_CASE )[2:]
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = {"""0""": """0""", """1""": """1"""}
UpperCamelCase , UpperCamelCase : int = """""", """"""
UpperCamelCase : int = len(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase : int = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
index += 1
UpperCamelCase : str = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
UpperCamelCase : Any = lexicon[curr_string]
result += last_match_id
return result
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Any = os.path.getsize(SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = bin(SCREAMING_SNAKE_CASE )[2:]
UpperCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = 8
try:
with open(SCREAMING_SNAKE_CASE , """wb""" ) as opened_file:
UpperCamelCase : Optional[int] = [
to_write[i : i + byte_length]
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(SCREAMING_SNAKE_CASE , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = read_file_binary(SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = compress_data(SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = add_file_length(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
write_file_binary(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 102 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:int = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class UpperCAmelCase :
def __init__( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Optional[int]=7 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : int=True , __lowerCamelCase : Any=9_9 , __lowerCamelCase : Tuple=3_2 , __lowerCamelCase : str=5 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : List[str]=3_7 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : int=5_1_2 , __lowerCamelCase : Dict=1_6 , __lowerCamelCase : str=2 , __lowerCamelCase : Optional[int]=0.0_2 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Union[str, Any]=None , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__lowerCamelCase , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ):
"""simple docstring"""
_snake_case = FalconModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
_snake_case = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : str , ):
"""simple docstring"""
_snake_case = True
_snake_case = FalconModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )
_snake_case = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , )
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : str , ):
"""simple docstring"""
_snake_case = FalconForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : str , ):
"""simple docstring"""
_snake_case = True
_snake_case = True
_snake_case = FalconForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# first forward pass
_snake_case = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase , )
_snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
_snake_case = torch.cat([input_mask, next_mask] , dim=-1 )
_snake_case = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )['''hidden_states'''][0]
_snake_case = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )['''hidden_states'''][0]
# select random slice
_snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
_snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 ) )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Optional[Any] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ : Tuple = (FalconForCausalLM,) if is_torch_available() else ()
A__ : Optional[Any] = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : List[Any] = False
A__ : List[Any] = False
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case = FalconModelTester(self )
_snake_case = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case , *_snake_case = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
_snake_case = alibi
self.model_tester.create_and_check_model(__lowerCamelCase , *__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = 3
_snake_case = input_dict['''input_ids''']
_snake_case = input_ids.ne(1 ).to(__lowerCamelCase )
_snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_snake_case = FalconForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = 3
_snake_case = '''single_label_classification'''
_snake_case = input_dict['''input_ids''']
_snake_case = input_ids.ne(1 ).to(__lowerCamelCase )
_snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_snake_case = FalconForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = input_dict['''input_ids''']
_snake_case = FalconForCausalLM(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase , use_cache=__lowerCamelCase )
_snake_case = input_ids.shape[0]
_snake_case = model._convert_to_rw_cache(result.past_key_values )
_snake_case = model._convert_cache_to_standard_format(__lowerCamelCase , __lowerCamelCase )
for layer in range(len(__lowerCamelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = 3
_snake_case = '''multi_label_classification'''
_snake_case = input_dict['''input_ids''']
_snake_case = input_ids.ne(1 ).to(__lowerCamelCase )
_snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_snake_case = FalconForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__lowerCamelCase , '''use_cache''' ):
return
_snake_case = model_class(__lowerCamelCase ).to(__lowerCamelCase )
if "use_cache" not in inputs:
_snake_case = True
_snake_case = model(**__lowerCamelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
_snake_case = (
getattr(__lowerCamelCase , '''decoder_layers''' , __lowerCamelCase )
or getattr(__lowerCamelCase , '''num_decoder_layers''' , __lowerCamelCase )
or config.num_hidden_layers
)
_snake_case = getattr(__lowerCamelCase , '''num_kv_heads''' , config.num_attention_heads )
_snake_case = getattr(__lowerCamelCase , '''d_model''' , config.hidden_size )
_snake_case = embed_dim // num_attention_heads
_snake_case = outputs['''past_key_values''']
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
_snake_case , _snake_case = inputs['''input_ids'''].shape
for i in range(__lowerCamelCase ):
if config.new_decoder_architecture:
_snake_case = config.num_attention_heads
elif config.multi_query:
_snake_case = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
_snake_case = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(__lowerCamelCase )
_snake_case = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__lowerCamelCase )
_snake_case = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
_snake_case = model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=1_9 )
_snake_case = tokenizer.batch_decode(__lowerCamelCase )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
_snake_case = AutoTokenizer.from_pretrained(__lowerCamelCase )
_snake_case = FalconForCausalLM.from_pretrained(__lowerCamelCase )
model.eval()
model.to(__lowerCamelCase )
_snake_case = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__lowerCamelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=4 )
model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=4 )
model.generate(**__lowerCamelCase , num_beams=2 , max_new_tokens=4 )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
_snake_case = AutoTokenizer.from_pretrained(__lowerCamelCase )
_snake_case = FalconForCausalLM.from_pretrained(__lowerCamelCase )
model.eval()
model.to(device=__lowerCamelCase )
_snake_case = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__lowerCamelCase )
# Test results are the same with and without cache
_snake_case = model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=2_0 , use_cache=__lowerCamelCase )
_snake_case = model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=2_0 , use_cache=__lowerCamelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 103 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
A : Optional[int] = int(_lowerCAmelCase )
# Initialize Result
A : int = []
# Traverse through all denomination
for denomination in reversed(_lowerCAmelCase ):
# Find denominations
while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ):
total_value -= int(_lowerCAmelCase )
answer.append(_lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = []
SCREAMING_SNAKE_CASE_:Dict = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
SCREAMING_SNAKE_CASE_:Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F"""Following is minimal change for {value}: """)
SCREAMING_SNAKE_CASE_:str = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 662 | 0 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
UpperCamelCase = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
A__ : str
A__ : Optional[str] = None
A__ : Optional[Union[str, int]] = None
A__ : Optional[Union[str, int]] = None
A__ : Optional[Union[str, int]] = None
def snake_case__ ( self ) -> Optional[int]:
A__ , A__ , A__ = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> str:
return f"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def snake_case__ ( self ) -> Union[str, Any]:
return self.major, self.minor, self.patch
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> int:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return Version(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return other
raise TypeError(f"""{other} (type {type(SCREAMING_SNAKE_CASE__ )}) cannot be compared to version.""" )
def __eq__( self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
try:
A__ = self._validate_operand(SCREAMING_SNAKE_CASE__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , SCREAMING_SNAKE_CASE__ ) -> str:
A__ = self._validate_operand(SCREAMING_SNAKE_CASE__ )
return self.tuple < other.tuple
def __hash__( self ) -> Dict:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def snake_case__ ( cls , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
A__ = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def snake_case__ ( self ) -> str:
return self.version_str
def _lowerCamelCase ( UpperCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
A__ = _VERSION_REG.match(UpperCAmelCase_ )
if not res:
raise ValueError(F"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(UpperCAmelCase_ ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def _lowerCamelCase ( UpperCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
return ".".join(str(UpperCAmelCase_ ) for v in version_tuple )
| 104 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_:Union[str, Any] = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE_:Any = dict(zip(vocab, range(len(vocab))))
SCREAMING_SNAKE_CASE_:Dict = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_:List[Any] = Path(tmpdirname)
SCREAMING_SNAKE_CASE_:str = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
SCREAMING_SNAKE_CASE_:Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
SCREAMING_SNAKE_CASE_:Any = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
SCREAMING_SNAKE_CASE_:Optional[Any] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
SCREAMING_SNAKE_CASE_:Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
SCREAMING_SNAKE_CASE_:str = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 662 | 0 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : List[Any] = XLMTokenizer
__a : int = False
def snake_case ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
SCREAMING_SNAKE_CASE_ : str = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ) as fp:
fp.write(json.dumps(snake_case__ ) )
with open(self.merges_file ,'w' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Dict = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = XLMTokenizer(self.vocab_file ,self.merges_file )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'lower'
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['low', 'er</w>']
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokens + ['<unk>']
SCREAMING_SNAKE_CASE_ : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode('sequence builders' ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode('multi-sequence build' ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.build_inputs_with_special_tokens(snake_case__ ,snake_case__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 105 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = """Hello, World!"""
SCREAMING_SNAKE_CASE_:List[Any] = """en_XX"""
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Optional[int] = Path("""data_bin""" )
A : Optional[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
A : Any = xmod.model.encoder.sentence_encoder
A : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowerCAmelCase )
A : int = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A : Any = xmod_sent_encoder.embed_tokens.weight
A : int = xmod_sent_encoder.embed_positions.weight
A : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A : Dict = xmod_sent_encoder.layernorm_embedding.weight
A : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A : str = model.roberta.encoder.layer[i]
A : Tuple = xmod_sent_encoder.layers[i]
# self attention
A : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
A : List[str] = xmod_layer.self_attn.q_proj.weight
A : Optional[int] = xmod_layer.self_attn.q_proj.bias
A : List[Any] = xmod_layer.self_attn.k_proj.weight
A : Union[str, Any] = xmod_layer.self_attn.k_proj.bias
A : Optional[int] = xmod_layer.self_attn.v_proj.weight
A : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
A : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
A : Optional[Any] = xmod_layer.self_attn.out_proj.weight
A : Dict = xmod_layer.self_attn.out_proj.bias
A : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight
A : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
A : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
A : Optional[int] = xmod_layer.fca.weight
A : Optional[int] = xmod_layer.fca.bias
# output
A : Dict = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
A : Union[str, Any] = xmod_layer.fca.weight
A : int = xmod_layer.fca.bias
A : List[str] = xmod_layer.final_layer_norm.weight
A : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A : str = xmod_layer.adapter_layer_norm.weight
A : str = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A : Optional[int] = bert_output.adapter_modules[lang_code]
A : int = xmod_layer.adapter_modules[lang_code]
A : Optional[Any] = from_adapter.fca.weight
A : Optional[Any] = from_adapter.fca.bias
A : List[str] = from_adapter.fca.weight
A : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A : Dict = xmod_sent_encoder.layer_norm.weight
A : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
A : int = xmod.model.classification_heads["""mnli"""].dense.weight
A : Optional[Any] = xmod.model.classification_heads["""mnli"""].dense.bias
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
A : Any = xmod.model.encoder.lm_head.dense.weight
A : Tuple = xmod.model.encoder.lm_head.dense.bias
A : Any = xmod.model.encoder.lm_head.layer_norm.weight
A : List[str] = xmod.model.encoder.lm_head.layer_norm.bias
A : Union[str, Any] = xmod.model.encoder.lm_head.weight
A : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A : Optional[int] = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
A : List[str] = model(_lowerCAmelCase )[0]
if classification_head:
A : Dict = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) )
else:
A : Optional[Any] = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A : Optional[Any] = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 662 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case :List[str] =logging.get_logger(__name__)
__snake_case :Dict ={'vocab_file': 'spiece.model'}
__snake_case :int ={
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
__snake_case :Union[str, Any] ={
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
__snake_case :Optional[Any] ='▁'
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : int = VOCAB_FILES_NAMES
A_ : int = PRETRAINED_VOCAB_FILES_MAP
A_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Any=True , __UpperCamelCase : List[str]=False , __UpperCamelCase : int="[CLS]" , __UpperCamelCase : Any="[SEP]" , __UpperCamelCase : Optional[Any]="<unk>" , __UpperCamelCase : Optional[Any]="[SEP]" , __UpperCamelCase : Dict="<pad>" , __UpperCamelCase : Any="[CLS]" , __UpperCamelCase : Optional[Any]="[MASK]" , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : Any , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A = (
AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase , normalized=__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase )
else mask_token
)
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
@property
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
return len(self.sp_model )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
A = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> List[str]:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : Optional[Any] , __UpperCamelCase : Tuple ) -> int:
A = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self : int , __UpperCamelCase : Union[str, Any] ) -> Optional[int]:
if self.remove_space:
A = ' '.join(inputs.strip().split() )
else:
A = inputs
A = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
A = unicodedata.normalize('NFKD' , __UpperCamelCase )
A = ''.join([c for c in outputs if not unicodedata.combining(__UpperCamelCase )] )
if self.do_lower_case:
A = outputs.lower()
return outputs
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : str ) -> List[str]:
A = self.preprocess_text(__UpperCamelCase )
A = self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
A = []
for piece in pieces:
if len(__UpperCamelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
A = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCamelCase , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A = cur_pieces[1:]
else:
A = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCamelCase )
else:
new_pieces.append(__UpperCamelCase )
return new_pieces
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Any ) -> Optional[int]:
return self.sp_model.PieceToId(__UpperCamelCase )
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[int] ) -> Optional[Any]:
return self.sp_model.IdToPiece(__UpperCamelCase )
def __UpperCamelCase ( self : List[Any] , __UpperCamelCase : List[str] ) -> List[str]:
A = []
A = ''
A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCamelCase ) + token
A = True
A = []
else:
current_sub_tokens.append(__UpperCamelCase )
A = False
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string.strip()
def __UpperCamelCase ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1]
def __UpperCamelCase ( self : int , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A = os.path.join(
__UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , 'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,) | 106 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Any = tempfile.mkdtemp()
A : List[str] = BlipImageProcessor()
A : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
A : str = BlipProcessor(lowerCamelCase__, lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).tokenizer
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).image_processor
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Any = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : Any = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : int = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Any = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
A : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase__, padding_value=1.0 )
A : Dict = BlipProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : str = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = self.prepare_image_inputs()
A : int = image_processor(lowerCamelCase__, return_tensors="""np""" )
A : Optional[Any] = processor(images=lowerCamelCase__, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : List[str] = self.get_image_processor()
A : int = self.get_tokenizer()
A : str = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = """lower newer"""
A : List[Any] = processor(text=lowerCamelCase__ )
A : str = tokenizer(lowerCamelCase__, return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Union[str, Any] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : Union[str, Any] = self.prepare_image_inputs()
A : str = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Optional[int] = processor.batch_decode(lowerCamelCase__ )
A : Dict = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : int = self.get_tokenizer()
A : Optional[int] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : List[str] = self.prepare_image_inputs()
A : Optional[int] = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 662 | 0 |
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
_UpperCAmelCase : Dict = logging.getLogger(__name__)
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = "masked_bert"
def __init__( self : Dict, UpperCamelCase__ : Optional[int]=3_05_22, UpperCamelCase__ : Dict=7_68, UpperCamelCase__ : Tuple=12, UpperCamelCase__ : str=12, UpperCamelCase__ : List[str]=30_72, UpperCamelCase__ : Tuple="gelu", UpperCamelCase__ : Dict=0.1, UpperCamelCase__ : List[Any]=0.1, UpperCamelCase__ : Optional[Any]=5_12, UpperCamelCase__ : Optional[Any]=2, UpperCamelCase__ : Optional[Any]=0.02, UpperCamelCase__ : str=1e-12, UpperCamelCase__ : Dict=0, UpperCamelCase__ : Optional[Any]="topK", UpperCamelCase__ : str="constant", UpperCamelCase__ : Any=0.0, **UpperCamelCase__ : Dict, ) -> List[Any]:
super().__init__(pad_token_id=UpperCamelCase__, **UpperCamelCase__ )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = pruning_method
_A = mask_init
_A = mask_scale
| 107 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
return f'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 4, 64, 64), lowerCamelCase__=False ):
A : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return image
def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__="CompVis/stable-diffusion-v1-4" ):
A : str = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = """bf16""" if fpaa else None
A , A : str = FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase__, subfolder="""unet""", dtype=lowerCamelCase__, revision=lowerCamelCase__ )
return model, params
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 77, 768), lowerCamelCase__=False ):
A : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
A : List[str] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : List[str] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""", fpaa=lowerCamelCase__ )
A : str = self.get_latents(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : int = self.get_encoder_hidden_states(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : Optional[Any] = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : Dict = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""", fpaa=lowerCamelCase__ )
A : int = self.get_latents(lowerCamelCase__, shape=(4, 4, 96, 96), fpaa=lowerCamelCase__ )
A : Union[str, Any] = self.get_encoder_hidden_states(lowerCamelCase__, shape=(4, 77, 1024), fpaa=lowerCamelCase__ )
A : Dict = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : List[Any] = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
| 662 | 0 |
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def lowerCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , """embed_dim""" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , """num_heads""" ) )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : Dict=13 , lowerCamelCase : List[Any]=64 , lowerCamelCase : str=3 , lowerCamelCase : List[str]=[16, 48, 96] , lowerCamelCase : List[Any]=[1, 3, 6] , lowerCamelCase : Tuple=[1, 2, 10] , lowerCamelCase : Optional[int]=[7, 3, 3] , lowerCamelCase : int=[4, 2, 2] , lowerCamelCase : Dict=[2, 1, 1] , lowerCamelCase : List[Any]=[2, 2, 2] , lowerCamelCase : Optional[int]=[False, False, True] , lowerCamelCase : Tuple=[0.0, 0.0, 0.0] , lowerCamelCase : Optional[Any]=0.02 , lowerCamelCase : Optional[int]=1E-12 , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Optional[int]=2 , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = patch_stride
_UpperCAmelCase = patch_padding
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = num_heads
_UpperCAmelCase = stride_kv
_UpperCAmelCase = depth
_UpperCAmelCase = cls_token
_UpperCAmelCase = attention_drop_rate
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
def lowerCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
# create a random int32 tensor of given shape
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = TFCvtModel(config=lowerCamelCase )
_UpperCAmelCase = model(lowerCamelCase , training=lowerCamelCase )
_UpperCAmelCase = (self.image_size, self.image_size)
_UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFCvtForImageClassification(lowerCamelCase )
_UpperCAmelCase = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
_lowerCamelCase = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def lowerCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_UpperCAmelCase = TFCvtModelTester(self )
_UpperCAmelCase = TFCvtConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def lowerCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def lowerCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def lowerCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def lowerCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def lowerCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def lowerCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def lowerCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(lowerCamelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def lowerCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(lowerCamelCase )
_UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def lowerCamelCase ( self : int ) -> str:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : Dict ):
_UpperCAmelCase = model_class(lowerCamelCase )
_UpperCAmelCase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def lowerCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFCvtModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""tf""" )
# forward pass
_UpperCAmelCase = model(**lowerCamelCase )
# verify the logits
_UpperCAmelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
_UpperCAmelCase = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase , atol=1E-4 ) ) | 108 |
from typing import Any
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Any = v.conjugate().T
A : List[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A : str = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 662 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = (
first_str_length if first_str_length > second_str_length else second_str_length
)
__SCREAMING_SNAKE_CASE = []
for char_count in range(__UpperCAmelCase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__UpperCAmelCase )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 109 |
from __future__ import annotations
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
A , A : int = np.shape(_lowerCAmelCase )
if rows != columns:
A : Union[str, Any] = (
"""'table' has to be of square shaped array but got a """
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_lowerCAmelCase )
A : Union[str, Any] = np.zeros((rows, columns) )
A : Dict = np.zeros((rows, columns) )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
A : Any = (table[i][j] - total) / upper[j][j]
A : Union[str, Any] = 1
for j in range(_lowerCAmelCase , _lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
A : str = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 0 |
import math
import sys
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
if number != int(_lowerCAmelCase ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
lowerCamelCase__: Dict =[-1] * (number + 1)
lowerCamelCase__: int =0
for i in range(1 , number + 1 ):
lowerCamelCase__: str =sys.maxsize
lowerCamelCase__: str =int(math.sqrt(_lowerCAmelCase ) )
for j in range(1 , root + 1 ):
lowerCamelCase__: Optional[int] =1 + answers[i - (j**2)]
lowerCamelCase__: int =min(_lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__: Union[str, Any] =answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None ):
A : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
A : Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
A : Any = math.ceil(val / multiple ) * multiple
return x
A : Optional[Any] = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size
A , A : List[Any] = get_image_size(_lowerCAmelCase )
A , A : List[Any] = output_size
# determine new height and width
A : Optional[int] = output_height / input_height
A : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
A : Any = scale_width
else:
# fit height
A : int = scale_height
A : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase )
A : int = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : int = size if size is not None else {"""height""": 384, """width""": 384}
A : str = get_size_dict(lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Optional[int] = size
A : Union[str, Any] = keep_aspect_ratio
A : int = ensure_multiple_of
A : Dict = resample
A : Optional[Any] = do_rescale
A : Any = rescale_factor
A : str = do_normalize
A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Dict = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
A : Optional[Any] = get_resize_output_image_size(
lowerCamelCase__, output_size=(size["""height"""], size["""width"""]), keep_aspect_ratio=lowerCamelCase__, multiple=lowerCamelCase__, )
return resize(lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A : str = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__ )
A : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
A : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
A : Tuple = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A : int = do_normalize if do_normalize is not None else self.do_normalize
A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
A : Optional[int] = image_std if image_std is not None else self.image_std
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : str = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Dict = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : Optional[Any] = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Dict = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCamelCase__ ):
A : int = target_sizes.numpy()
A : Union[str, Any] = []
for idx in range(len(lowerCamelCase__ ) ):
A : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="""bilinear""", align_corners=lowerCamelCase__ )
A : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
A : List[str] = logits.argmax(dim=1 )
A : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 662 | 0 |
'''simple docstring'''
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = """"""
SCREAMING_SNAKE_CASE : Optional[int] = """"""
SCREAMING_SNAKE_CASE : Tuple = []
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
SCREAMING_SNAKE_CASE : List[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.__min_dist_top_down_dp(lowerCamelCase__ , n - 1 )
SCREAMING_SNAKE_CASE : Optional[int] = self.__min_dist_top_down_dp(m - 1 , lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Any = self.__min_dist_top_down_dp(m - 1 , n - 1 )
SCREAMING_SNAKE_CASE : Any = 1 + min(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return self.dp[m][n]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = worda
SCREAMING_SNAKE_CASE : str = worda
SCREAMING_SNAKE_CASE : Optional[int] = [[-1 for _ in range(len(lowerCamelCase__ ) )] for _ in range(len(lowerCamelCase__ ) )]
return self.__min_dist_top_down_dp(len(lowerCamelCase__ ) - 1 , len(lowerCamelCase__ ) - 1 )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = worda
SCREAMING_SNAKE_CASE : Optional[Any] = worda
SCREAMING_SNAKE_CASE : List[Any] = len(lowerCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Any = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
SCREAMING_SNAKE_CASE : Any = j
elif j == 0: # second string is empty
SCREAMING_SNAKE_CASE : Any = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
SCREAMING_SNAKE_CASE : List[Any] = self.dp[i - 1][j - 1]
else:
SCREAMING_SNAKE_CASE : List[Any] = self.dp[i][j - 1]
SCREAMING_SNAKE_CASE : int = self.dp[i - 1][j]
SCREAMING_SNAKE_CASE : Dict = self.dp[i - 1][j - 1]
SCREAMING_SNAKE_CASE : Dict = 1 + min(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return self.dp[m][n]
if __name__ == "__main__":
__UpperCAmelCase = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
__UpperCAmelCase = input("""Enter the first string: """).strip()
__UpperCAmelCase = input("""Enter the second string: """).strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 379 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
# we need a list not a string, so do something to change the type
A : List[Any] = arr.split(""",""" )
def _lowerCAmelCase ( self ):
A : int = [int(self.array[0] )] * len(self.array )
A : Optional[Any] = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
A : Union[str, Any] = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
A : Dict = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:int = input("""please input some numbers:""")
SCREAMING_SNAKE_CASE_:Dict = SubArray(whole_array)
SCREAMING_SNAKE_CASE_:Optional[int] = array.solve_sub_array()
print(("""the results is:""", re))
| 662 | 0 |
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> int:
'''simple docstring'''
def count_of_possible_combinations(UpperCAmelCase__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCAmelCase )
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
UpperCAmelCase__,UpperCAmelCase__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
a__ = sum(
count_of_possible_combinations_with_dp_array(target - item,_lowerCAmelCase )
for item in array )
a__ = answer
return answer
a__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCAmelCase,_lowerCAmelCase )
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> int:
'''simple docstring'''
a__ = [0] * (target + 1)
a__ = 1
for i in range(1,target + 1 ):
for j in range(_lowerCAmelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = 3
__magic_name__ = 5
__magic_name__ = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 232 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:List[Any] = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "bit"
__lowerCamelCase : Union[str, Any] = ["preactivation", "bottleneck"]
__lowerCamelCase : Union[str, Any] = ["SAME", "VALID"]
def __init__( self, lowerCamelCase__=3, lowerCamelCase__=64, lowerCamelCase__=[256, 512, 1024, 2048], lowerCamelCase__=[3, 4, 6, 3], lowerCamelCase__="preactivation", lowerCamelCase__="relu", lowerCamelCase__=None, lowerCamelCase__=32, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__=32, lowerCamelCase__=1, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A : List[Any] = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
A : Dict = num_channels
A : List[Any] = embedding_size
A : Optional[Any] = hidden_sizes
A : str = depths
A : str = layer_type
A : Union[str, Any] = hidden_act
A : Any = global_padding
A : Optional[int] = num_groups
A : Dict = drop_path_rate
A : List[Any] = embedding_dynamic_padding
A : List[Any] = output_stride
A : Union[str, Any] = width_factor
A : Dict = ["""stem"""] + [f'''stage{idx}''' for idx in range(1, len(lowerCamelCase__ ) + 1 )]
A , A : Any = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__, out_indices=lowerCamelCase__, stage_names=self.stage_names )
| 662 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : int = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase = "gpt_neox"
def __init__( self : int , __lowerCamelCase : Dict=50_432 , __lowerCamelCase : Optional[int]=6_144 , __lowerCamelCase : str=44 , __lowerCamelCase : Dict=64 , __lowerCamelCase : Tuple=24_576 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : int=0.25 , __lowerCamelCase : List[Any]=10_000 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : int=2_048 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Dict=1E-5 , __lowerCamelCase : str=True , __lowerCamelCase : Dict=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]=False , __lowerCamelCase : int=True , __lowerCamelCase : List[str]=None , **__lowerCamelCase : List[Any] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = rotary_pct
__lowercase = rotary_emb_base
__lowercase = attention_dropout
__lowercase = hidden_dropout
__lowercase = classifier_dropout
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = use_cache
__lowercase = tie_word_embeddings
__lowercase = use_parallel_residual
__lowercase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}" )
__lowercase = self.rope_scaling.get('type' , lowerCamelCase__ )
__lowercase = self.rope_scaling.get('factor' , lowerCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 375 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=50, lowerCamelCase__=0.02, lowerCamelCase__=True, lowerCamelCase__=None, ):
A : List[str] = parent
A : List[str] = batch_size
A : Optional[int] = seq_length
A : Optional[int] = is_training
A : Tuple = use_input_mask
A : Optional[Any] = vocab_size
A : str = hidden_size
A : Any = num_hidden_layers
A : List[Any] = num_attention_heads
A : Optional[int] = intermediate_size
A : int = hidden_act
A : Dict = hidden_dropout_prob
A : Optional[Any] = attention_probs_dropout_prob
A : List[Any] = max_position_embeddings
A : int = initializer_range
A : Tuple = use_labels
A : List[str] = scope
def _lowerCAmelCase ( self ):
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : int = None
if self.use_input_mask:
A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCAmelCase ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, )
def _lowerCAmelCase ( self ):
(
(
A
) , (
A
) , (
A
) , (
A
) ,
) : List[Any] = self.prepare_config_and_inputs()
A : Any = True
A : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : str = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Optional[int] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ )
A : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : List[str] = True
A : Union[str, Any] = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, )
A : Optional[Any] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : Union[str, Any] = True
A : Optional[int] = True
A : Optional[int] = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
# first forward pass
A : int = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, use_cache=lowerCamelCase__, )
A : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
A : int = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
A : List[str] = torch.cat([input_ids, next_tokens], dim=-1 )
A : Union[str, Any] = torch.cat([input_mask, next_mask], dim=-1 )
A : List[str] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
# select random slice
A : Any = ids_tensor((1,), output_from_past.shape[-1] ).item()
A : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
A : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__, ):
A : Optional[int] = BertGenerationDecoder(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : List[str] = model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self ):
A , A , A , A : str = self.prepare_config_and_inputs()
A : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__lowerCamelCase : int = (BertGenerationDecoder,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self ):
A : Any = BertGenerationEncoderTester(self )
A : Optional[int] = ConfigTester(self, config_class=lowerCamelCase__, hidden_size=37 )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A , A , A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
A : Any = """bert"""
self.model_tester.create_and_check_model(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
A : int = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, )
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
A : Tuple = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : Optional[int] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Union[str, Any] = model(lowerCamelCase__ )[0]
A : List[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Tuple = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Dict = model(lowerCamelCase__ )[0]
A : List[str] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Optional[Any] = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
| 662 | 0 |
class __lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase__ : List[str]):
# we need a list not a string, so do something to change the type
SCREAMING_SNAKE_CASE_: List[Any] = arr.split(",")
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: int = [int(self.array[0])] * len(self.array)
SCREAMING_SNAKE_CASE_: Optional[Any] = [int(self.array[0])] * len(self.array)
for i in range(1 , len(self.array)):
SCREAMING_SNAKE_CASE_: Union[str, Any] = max(
int(self.array[i]) + sum_value[i - 1] , int(self.array[i]))
SCREAMING_SNAKE_CASE_: Dict = max(sum_value[i] , rear[i - 1])
return rear[len(self.array) - 1]
if __name__ == "__main__":
lowerCAmelCase : int = input("""please input some numbers:""")
lowerCAmelCase : Dict = SubArray(whole_array)
lowerCAmelCase : Optional[int] = array.solve_sub_array()
print(("""the results is:""", re))
| 671 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : str = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 384}
A : Optional[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Dict = size
# Default value set here for backwards compatibility where the value in config is None
A : Dict = crop_pct if crop_pct is not None else 224 / 256
A : Optional[int] = resample
A : List[str] = do_rescale
A : Tuple = rescale_factor
A : Optional[int] = do_normalize
A : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
A : List[str] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : int = int(shortest_edge / crop_pct )
A : List[Any] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Dict = do_resize if do_resize is not None else self.do_resize
A : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
A : str = resample if resample is not None else self.resample
A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Dict = do_normalize if do_normalize is not None else self.do_normalize
A : List[str] = image_mean if image_mean is not None else self.image_mean
A : Optional[Any] = image_std if image_std is not None else self.image_std
A : Optional[Any] = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : List[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : str = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Tuple = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
| 662 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
__lowercase = feature_size
__lowercase = sampling_rate
__lowercase = padding_value
__lowercase = kwargs.pop("padding_side" , "right" )
__lowercase = kwargs.pop("return_attention_mask" , lowerCamelCase__ )
super().__init__(**lowerCamelCase__ )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCamelCase__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__lowercase = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
__lowercase = processed_features[self.model_input_names[0]]
__lowercase = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase__ ) == 0:
if return_attention_mask:
__lowercase = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__lowercase = required_input[0]
if isinstance(lowerCamelCase__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__lowercase = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase__ ):
__lowercase = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase__ ):
__lowercase = """tf"""
elif is_torch_tensor(lowerCamelCase__ ):
__lowercase = """pt"""
elif isinstance(lowerCamelCase__ , (int, float, list, tuple, np.ndarray) ):
__lowercase = """np"""
else:
raise ValueError(
f'''type of {first_element} unknown: {type(lowerCamelCase__ )}. '''
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__lowercase = to_numpy(lowerCamelCase__ )
else:
__lowercase = [to_numpy(lowerCamelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
__lowercase = self._get_padding_strategies(padding=lowerCamelCase__ , max_length=lowerCamelCase__ )
__lowercase = processed_features[self.model_input_names[0]]
__lowercase = len(lowerCamelCase__ )
if not all(len(lowerCamelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
__lowercase = []
for i in range(lowerCamelCase__ ):
__lowercase = {k: v[i] for k, v in processed_features.items()}
# truncation
__lowercase = self._truncate(
lowerCamelCase__ , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , truncation=lowerCamelCase__ , )
truncated_inputs.append(lowerCamelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__lowercase = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__lowercase = PaddingStrategy.MAX_LENGTH
__lowercase = {}
for i in range(lowerCamelCase__ ):
# padding
__lowercase = self._pad(
truncated_inputs[i] , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
for key, value in outputs.items():
if key not in batch_outputs:
__lowercase = []
if value.dtype is np.dtype(np.floataa ):
__lowercase = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase__ )
return BatchFeature(lowerCamelCase__ , tensor_type=lowerCamelCase__ )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ):
__lowercase = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__lowercase = len(lowerCamelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__lowercase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__lowercase = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__lowercase = np.ones(len(lowerCamelCase__ ) , dtype=np.intaa )
if needs_to_be_padded:
__lowercase = max_length - len(lowerCamelCase__ )
if self.padding_side == "right":
if return_attention_mask:
__lowercase = np.pad(
processed_features["attention_mask"] , (0, difference) )
__lowercase = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__lowercase = np.pad(
lowerCamelCase__ , lowerCamelCase__ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__lowercase = np.pad(
processed_features["attention_mask"] , (difference, 0) )
__lowercase = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__lowercase = np.pad(
lowerCamelCase__ , lowerCamelCase__ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
__lowercase = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__lowercase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__lowercase = len(lowerCamelCase__ ) > max_length
if needs_to_be_truncated:
__lowercase = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__lowercase = processed_features["""attention_mask"""][:max_length]
return processed_features
def snake_case__ ( self , lowerCAmelCase_=False , lowerCAmelCase_=None ):
# Get padding strategy
if padding is not False:
if padding is True:
__lowercase = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowercase = PaddingStrategy(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowercase = padding
else:
__lowercase = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 321 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
A : Dict = """backbone.""" if is_semantic else """"""
A : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
A : Dict = """backbone.""" if is_semantic else """"""
# queries, keys and values
A : Union[str, Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A : Optional[int] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A : int = in_proj_weight[
: config.hidden_size, :
]
A : Any = q_bias
A : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A : Tuple = in_proj_weight[
-config.hidden_size :, :
]
A : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A : str = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A : Dict = gamma_a
A : Dict = gamma_a
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : List[str] = dct.pop(_lowerCAmelCase )
A : Optional[Any] = val
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
A : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
"""simple docstring"""
A : Dict = False if """rvlcdip""" in checkpoint_url else True
A : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A : Dict = 1024
A : List[Any] = 4096
A : int = 24
A : int = 16
# labels
if "rvlcdip" in checkpoint_url:
A : List[Any] = 16
A : List[Any] = """huggingface/label-files"""
A : int = """rvlcdip-id2label.json"""
A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A : int = idalabel
A : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
A : str = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase )
# load HuggingFace model
A : Any = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A : Any = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase )
A : int = prepare_img()
A : Tuple = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
A : str = encoding["""pixel_values"""]
A : Tuple = model(_lowerCAmelCase )
A : Optional[int] = outputs.logits
# verify logits
A : Tuple = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
A : Any = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
A : List[Any] = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 662 | 0 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
a_ = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
a_ = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
a_ = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
a_ = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
a_ = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : List[Any] ):
for tf_name, hf_name in patterns:
__lowerCamelCase = k.replace(_lowerCAmelCase ,_lowerCAmelCase )
return k
def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : Optional[Any] ):
__lowerCamelCase = BigBirdPegasusConfig(**_lowerCAmelCase )
__lowerCamelCase = BigBirdPegasusForConditionalGeneration(_lowerCAmelCase )
__lowerCamelCase = torch_model.state_dict()
__lowerCamelCase = {}
# separating decoder weights
__lowerCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
__lowerCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
__lowerCamelCase = [k.endswith(_lowerCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(_lowerCAmelCase ):
continue
__lowerCamelCase = DECODER_PATTERNS
__lowerCamelCase = rename_state_dict_key(_lowerCAmelCase ,_lowerCAmelCase )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__lowerCamelCase = v.T
__lowerCamelCase = torch.from_numpy(_lowerCAmelCase )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
__lowerCamelCase = [k.endswith(_lowerCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(_lowerCAmelCase ):
continue
__lowerCamelCase = REMAINING_PATTERNS
__lowerCamelCase = rename_state_dict_key(_lowerCAmelCase ,_lowerCAmelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__lowerCamelCase = v.T
__lowerCamelCase = torch.from_numpy(_lowerCAmelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
__lowerCamelCase = mapping["""model.embed_positions.weight"""]
__lowerCamelCase = mapping.pop('''model.embed_positions.weight''' )
__lowerCamelCase = torch_model.load_state_dict(_lowerCAmelCase ,strict=_lowerCAmelCase )
__lowerCamelCase = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def a__ ( _UpperCamelCase : Optional[int] ):
__lowerCamelCase = tf.train.list_variables(_lowerCAmelCase )
__lowerCamelCase = {}
__lowerCamelCase = ["""global_step"""]
for name, shape in tqdm(_lowerCAmelCase ,desc='''converting tf checkpoint to dict''' ):
__lowerCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase = tf.train.load_variable(_lowerCAmelCase ,_lowerCAmelCase )
__lowerCamelCase = array
return tf_weights
def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : List[Any] ,_UpperCamelCase : List[str] ):
__lowerCamelCase = get_tf_weights_as_numpy(_lowerCAmelCase )
__lowerCamelCase = convert_bigbird_pegasus(_lowerCAmelCase ,_lowerCAmelCase )
torch_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
a_ = parser.parse_args()
a_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 175 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""", lowerCamelCase__, )
super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
| 662 | 0 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
_UpperCAmelCase : Union[str, Any] = """Usage of script: script_name <size_of_canvas:int>"""
_UpperCAmelCase : Union[str, Any] = [0] * 1_00 + [1] * 10
random.shuffle(choice)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[list[bool]]:
lowerCamelCase__ : str = [[False for i in range(_lowerCAmelCase )] for j in range(_lowerCAmelCase )]
return canvas
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> None:
for i, row in enumerate(_lowerCAmelCase ):
for j, _ in enumerate(_lowerCAmelCase ):
lowerCamelCase__ : List[str] = bool(random.getrandbits(1 ) )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[list[bool]]:
lowerCamelCase__ : str = np.array(_lowerCAmelCase )
lowerCamelCase__ : str = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(_lowerCAmelCase ):
for c, pt in enumerate(_lowerCAmelCase ):
lowerCamelCase__ : Any = __judge_point(
_lowerCAmelCase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
lowerCamelCase__ : Optional[int] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
lowerCamelCase__ : list[list[bool]] = current_canvas.tolist()
return return_canvas
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> bool:
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : List[str] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
lowerCamelCase__ : Optional[int] = pt
if pt:
if alive < 2:
lowerCamelCase__ : Optional[int] = False
elif alive == 2 or alive == 3:
lowerCamelCase__ : Tuple = True
elif alive > 3:
lowerCamelCase__ : Union[str, Any] = False
else:
if alive == 3:
lowerCamelCase__ : Tuple = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
_UpperCAmelCase : Any = int(sys.argv[1])
# main working structure of this module.
_UpperCAmelCase : Any = create_canvas(canvas_size)
seed(c)
_UpperCAmelCase : int = plt.subplots()
fig.show()
_UpperCAmelCase : List[str] = ListedColormap(["""w""", """k"""])
try:
while True:
_UpperCAmelCase : Dict = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 295 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(
lowerCamelCase__, split=lowerCamelCase__, features=lowerCamelCase__, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__, streaming=lowerCamelCase__, num_proc=lowerCamelCase__, **lowerCamelCase__, )
A : List[Any] = path_or_paths if isinstance(lowerCamelCase__, lowerCamelCase__ ) else {self.split: path_or_paths}
A : str = Text(
cache_dir=lowerCamelCase__, data_files=lowerCamelCase__, features=lowerCamelCase__, **lowerCamelCase__, )
def _lowerCAmelCase ( self ):
# Build iterable dataset
if self.streaming:
A : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A : List[str] = None
A : Dict = None
A : Tuple = None
A : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__, download_mode=lowerCamelCase__, verification_mode=lowerCamelCase__, base_path=lowerCamelCase__, num_proc=self.num_proc, )
A : List[str] = self.builder.as_dataset(
split=self.split, verification_mode=lowerCamelCase__, in_memory=self.keep_in_memory )
return dataset
| 662 | 0 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowercase__ : List[Any] = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase):
@classmethod
def _snake_case ( cls : Optional[Any] ):
snake_case_ : List[str] = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def _snake_case ( cls : Optional[int] ):
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def _snake_case ( self : List[Any] ):
snake_case_ : str = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
snake_case_ : List[str] = FlaxBertModel(lowerCamelCase__ )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
snake_case_ : Dict = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax" )
snake_case_ : int = flatten_dict(unfreeze(model.params ) )
snake_case_ : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
snake_case_ : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ , 1E-3 , msg=f"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ , repo_id='''test-model-flax''' , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
snake_case_ : Dict = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax" )
snake_case_ : Union[str, Any] = flatten_dict(unfreeze(model.params ) )
snake_case_ : int = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
snake_case_ : Optional[int] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ , 1E-3 , msg=f"{key} not identical" )
def _snake_case ( self : List[str] ):
snake_case_ : str = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
snake_case_ : str = FlaxBertModel(lowerCamelCase__ )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
snake_case_ : Any = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
snake_case_ : Optional[Any] = flatten_dict(unfreeze(model.params ) )
snake_case_ : Any = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
snake_case_ : Dict = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ , 1E-3 , msg=f"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCamelCase__ , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
snake_case_ : Optional[int] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
snake_case_ : Any = flatten_dict(unfreeze(model.params ) )
snake_case_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
snake_case_ : Optional[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ , 1E-3 , msg=f"{key} not identical" )
def __lowercase ( _a , _a ):
snake_case_ : int = True
snake_case_ : str = flatten_dict(modela.params )
snake_case_ : int = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
snake_case_ : Optional[Any] = False
return models_are_equal
@require_flax
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Optional[int] ):
snake_case_ : int = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
snake_case_ : Any = FlaxBertModel(lowerCamelCase__ )
snake_case_ : Tuple = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) )
with self.assertRaises(lowerCamelCase__ ):
snake_case_ : Tuple = FlaxBertModel.from_pretrained(lowerCamelCase__ )
snake_case_ : Union[str, Any] = FlaxBertModel.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.assertTrue(check_models_equal(lowerCamelCase__ , lowerCamelCase__ ) )
def _snake_case ( self : Optional[Any] ):
snake_case_ : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
snake_case_ : List[str] = FlaxBertModel(lowerCamelCase__ )
snake_case_ : Dict = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , max_shard_size='''10KB''' )
with self.assertRaises(lowerCamelCase__ ):
snake_case_ : int = FlaxBertModel.from_pretrained(lowerCamelCase__ )
snake_case_ : Optional[Any] = FlaxBertModel.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.assertTrue(check_models_equal(lowerCamelCase__ , lowerCamelCase__ ) )
def _snake_case ( self : Optional[int] ):
snake_case_ : Tuple = """bert"""
snake_case_ : List[str] = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(lowerCamelCase__ ):
snake_case_ : Union[str, Any] = FlaxBertModel.from_pretrained(lowerCamelCase__ )
snake_case_ : int = FlaxBertModel.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _snake_case ( self : Optional[int] ):
snake_case_ : List[str] = """bert"""
snake_case_ : Dict = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(lowerCamelCase__ ):
snake_case_ : Optional[int] = FlaxBertModel.from_pretrained(lowerCamelCase__ )
snake_case_ : int = FlaxBertModel.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
| 123 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
SCREAMING_SNAKE_CASE_:int = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 662 | 0 |
from __future__ import annotations
_snake_case = list[tuple[int, int]]
_snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_snake_case = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _snake_case :
def __init__( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Any , __lowerCamelCase: Dict , __lowerCamelCase: Tuple , __lowerCamelCase: Tuple , ) -> List[Any]:
__UpperCAmelCase : Tuple = pos_x
__UpperCAmelCase : Optional[int] = pos_y
__UpperCAmelCase : Union[str, Any] = (pos_y, pos_x)
__UpperCAmelCase : Optional[int] = goal_x
__UpperCAmelCase : Tuple = goal_y
__UpperCAmelCase : Dict = g_cost
__UpperCAmelCase : Any = parent
__UpperCAmelCase : str = self.calculate_heuristic()
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
__UpperCAmelCase : Dict = abs(self.pos_x - self.goal_x )
__UpperCAmelCase : str = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self: List[str] , __lowerCamelCase: List[Any] ) -> Union[str, Any]:
return self.f_cost < other.f_cost
class _snake_case :
def __init__( self: int , __lowerCamelCase: int , __lowerCamelCase: Any ) -> List[str]:
__UpperCAmelCase : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase__ )
__UpperCAmelCase : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase__ )
__UpperCAmelCase : List[str] = [self.start]
__UpperCAmelCase : list[Node] = []
__UpperCAmelCase : int = False
def _lowerCamelCase ( self: Any ) -> str:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCAmelCase : int = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__UpperCAmelCase : Tuple = True
return self.retrace_path(lowerCamelCase__ )
self.closed_nodes.append(lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = self.get_successors(lowerCamelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase__ )
else:
# retrieve the best current path
__UpperCAmelCase : str = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase__ )
else:
self.open_nodes.append(lowerCamelCase__ )
if not self.reached:
return [self.start.pos]
return None
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] ) -> str:
__UpperCAmelCase : Optional[int] = []
for action in delta:
__UpperCAmelCase : List[str] = parent.pos_x + action[1]
__UpperCAmelCase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase__ , lowerCamelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase__ , ) )
return successors
def _lowerCamelCase ( self: Any , __lowerCamelCase: List[Any] ) -> Dict:
__UpperCAmelCase : Any = node
__UpperCAmelCase : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__UpperCAmelCase : Union[str, Any] = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
_snake_case = (0, 0)
_snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
_snake_case = GreedyBestFirst(init, goal)
_snake_case = greedy_bf.search()
if path:
for pos_x, pos_y in path:
_snake_case = 2
for elem in grid:
print(elem)
| 382 |
def __UpperCamelCase ( _lowerCAmelCase = 1000 ) -> int:
"""simple docstring"""
A , A : str = 1, 1
A : List[Any] = []
for i in range(1 , n + 1 ):
A : Optional[int] = prev_numerator + 2 * prev_denominator
A : Any = prev_numerator + prev_denominator
if len(str(_lowerCAmelCase ) ) > len(str(_lowerCAmelCase ) ):
result.append(_lowerCAmelCase )
A : int = numerator
A : int = denominator
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 662 | 0 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
a__ = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
a__ = {
"""ctrl""": 2_5_6,
}
a__ = {
"""Pregnancy""": 1_6_8_6_2_9,
"""Christianity""": 7_6_7_5,
"""Explain""": 1_0_6_4_2_3,
"""Fitness""": 6_3_4_4_0,
"""Saving""": 6_3_1_6_3,
"""Ask""": 2_7_1_7_1,
"""Ass""": 9_5_9_8_5,
"""Joke""": 1_6_3_5_0_9,
"""Questions""": 4_5_6_2_2,
"""Thoughts""": 4_9_6_0_5,
"""Retail""": 5_2_3_4_2,
"""Feminism""": 1_6_4_3_3_8,
"""Writing""": 1_1_9_9_2,
"""Atheism""": 1_9_2_2_6_3,
"""Netflix""": 4_8_6_1_6,
"""Computing""": 3_9_6_3_9,
"""Opinion""": 4_3_2_1_3,
"""Alone""": 4_4_9_6_7,
"""Funny""": 5_8_9_1_7,
"""Gaming""": 4_0_3_5_8,
"""Human""": 4_0_8_8,
"""India""": 1_3_3_1,
"""Joker""": 7_7_1_3_8,
"""Diet""": 3_6_2_0_6,
"""Legal""": 1_1_8_5_9,
"""Norman""": 4_9_3_9,
"""Tip""": 7_2_6_8_9,
"""Weight""": 5_2_3_4_3,
"""Movies""": 4_6_2_7_3,
"""Running""": 2_3_4_2_5,
"""Science""": 2_0_9_0,
"""Horror""": 3_7_7_9_3,
"""Confession""": 6_0_5_7_2,
"""Finance""": 1_2_2_5_0,
"""Politics""": 1_6_3_6_0,
"""Scary""": 1_9_1_9_8_5,
"""Support""": 1_2_6_5_4,
"""Technologies""": 3_2_5_1_6,
"""Teenage""": 6_6_1_6_0,
"""Event""": 3_2_7_6_9,
"""Learned""": 6_7_4_6_0,
"""Notion""": 1_8_2_7_7_0,
"""Wikipedia""": 3_7_5_8_3,
"""Books""": 6_6_6_5,
"""Extract""": 7_6_0_5_0,
"""Confessions""": 1_0_2_7_0_1,
"""Conspiracy""": 7_5_9_3_2,
"""Links""": 6_3_6_7_4,
"""Narcissus""": 1_5_0_4_2_5,
"""Relationship""": 5_4_7_6_6,
"""Relationships""": 1_3_4_7_9_6,
"""Reviews""": 4_1_6_7_1,
"""News""": 4_2_5_6,
"""Translation""": 2_6_8_2_0,
"""multilingual""": 1_2_8_4_0_6,
}
def _UpperCAmelCase ( a : int ):
snake_case__ = set()
snake_case__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ = char
snake_case__ = set(_lowerCAmelCase )
return pairs
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowercase : int = VOCAB_FILES_NAMES
_lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Any = CONTROL_CODES
def __init__( self : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]="<unk>" , **UpperCamelCase__ : str):
'''simple docstring'''
super().__init__(unk_token=lowerCamelCase__ , **lowerCamelCase__)
with open(lowerCamelCase__ , encoding="""utf-8""") as vocab_handle:
snake_case__ = json.load(lowerCamelCase__)
snake_case__ = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase__ , encoding="""utf-8""") as merges_handle:
snake_case__ = merges_handle.read().split("""\n""")[1:-1]
snake_case__ = [tuple(merge.split()) for merge in merges]
snake_case__ = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__))))
snake_case__ = {}
@property
def __magic_name__ ( self : Any):
'''simple docstring'''
return len(self.encoder)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
snake_case__ = tuple(lowerCamelCase__)
snake_case__ = tuple(list(word[:-1]) + [word[-1] + """</w>"""])
snake_case__ = get_pairs(lowerCamelCase__)
if not pairs:
return token
while True:
snake_case__ = min(lowerCamelCase__ , key=lambda UpperCamelCase__: self.bpe_ranks.get(lowerCamelCase__ , float("""inf""")))
if bigram not in self.bpe_ranks:
break
snake_case__ = bigram
snake_case__ = []
snake_case__ = 0
while i < len(lowerCamelCase__):
try:
snake_case__ = word.index(lowerCamelCase__ , lowerCamelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
snake_case__ = j
if word[i] == first and i < len(lowerCamelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
snake_case__ = tuple(lowerCamelCase__)
snake_case__ = new_word
if len(lowerCamelCase__) == 1:
break
else:
snake_case__ = get_pairs(lowerCamelCase__)
snake_case__ = """@@ """.join(lowerCamelCase__)
snake_case__ = word[:-4]
snake_case__ = word
return word
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = []
snake_case__ = re.findall(R"""\S+\n?""" , lowerCamelCase__)
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase__).split(""" """)))
return split_tokens
def __magic_name__ ( self : Dict , UpperCamelCase__ : Any):
'''simple docstring'''
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token))
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : List[str]):
'''simple docstring'''
return self.decoder.get(lowerCamelCase__ , self.unk_token)
def __magic_name__ ( self : str , UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = """ """.join(lowerCamelCase__).replace("""@@ """ , """""").strip()
return out_string
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] = None):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
snake_case__ = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
snake_case__ = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""])
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__) + """\n""")
snake_case__ = 0
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""") as writer:
writer.write("""#version: 0.2\n""")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__: kv[1]):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""")
snake_case__ = token_index
writer.write(""" """.join(lowerCamelCase__) + """\n""")
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 654 |
import re
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
if len(re.findall("""[ATCG]""" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowercase_ = "microsoft/speecht5_tts"
lowercase_ = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
lowercase_ = "text_reader"
lowercase_ = SpeechTaProcessor
lowercase_ = SpeechTaForTextToSpeech
lowercase_ = SpeechTaHifiGan
lowercase_ = ["text"]
lowercase_ = ["audio"]
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[Any]:
'''simple docstring'''
if self.post_processor is None:
lowerCamelCase__: Optional[int] ="""microsoft/speecht5_hifigan"""
super().setup()
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=None) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.pre_processor(text=lowerCamelCase__ , return_tensors="pt" , truncation=lowerCamelCase__)
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings.")
lowerCamelCase__: List[Any] =load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation")
lowerCamelCase__: int =torch.tensor(embeddings_dataset[7_305]["xvector"]).unsqueeze(0)
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : List[str]) ->str:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**lowerCamelCase__)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : str) ->List[Any]:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(lowerCamelCase__).cpu().detach()
| 59 |
from __future__ import annotations
SCREAMING_SNAKE_CASE_:Tuple = """#"""
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self ):
A : dict = {}
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = self._trie
for char in text:
if char not in trie:
A : str = {}
A : str = trie[char]
A : Optional[int] = True
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Dict = self._trie
for char in prefix:
if char in trie:
A : Optional[Any] = trie[char]
else:
return []
return self._elements(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : int = []
for c, v in d.items():
A : List[Any] = [""" """] if c == END else [(c + s) for s in self._elements(lowerCamelCase__ )]
result.extend(lowerCamelCase__ )
return tuple(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_:Any = Trie()
SCREAMING_SNAKE_CASE_:Tuple = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple:
"""simple docstring"""
A : List[str] = trie.find_word(_lowerCAmelCase )
return tuple(string + word for word in suffixes )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 662 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : int = 5
# Realm tok
SCREAMING_SNAKE_CASE : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""test""",
"""question""",
"""this""",
"""is""",
"""the""",
"""first""",
"""second""",
"""third""",
"""fourth""",
"""fifth""",
"""record""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE : Dict = os.path.join(self.tmpdirname , """realm_tokenizer""" )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
SCREAMING_SNAKE_CASE : str = os.path.join(lowerCamelCase__ , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE : Tuple = os.path.join(self.tmpdirname , """realm_block_records""" )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""question""": ["""foo""", """bar"""],
"""answers""": [["""Foo""", """Bar"""], ["""Bar"""]],
} )
return dataset
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = np.array(
[
b"""This is the first record""",
b"""This is the second record""",
b"""This is the third record""",
b"""This is the fourth record""",
b"""This is the fifth record""",
b"""This is a longer longer longer record""",
] , dtype=lowerCamelCase__ , )
return block_records
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config()
SCREAMING_SNAKE_CASE : int = self.get_dummy_retriever()
SCREAMING_SNAKE_CASE : Tuple = retriever.tokenizer
SCREAMING_SNAKE_CASE : str = np.array([0, 3] , dtype="""long""" )
SCREAMING_SNAKE_CASE : Tuple = tokenizer(["""Test question"""] ).input_ids
SCREAMING_SNAKE_CASE : int = tokenizer(
["""the fourth"""] , add_special_tokens=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , ).input_ids
SCREAMING_SNAKE_CASE : List[str] = config.reader_seq_len
SCREAMING_SNAKE_CASE : Optional[int] = retriever(
lowerCamelCase__ , lowerCamelCase__ , answer_ids=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors="""np""" )
self.assertEqual(len(lowerCamelCase__ ) , 2 )
self.assertEqual(len(lowerCamelCase__ ) , 2 )
self.assertEqual(len(lowerCamelCase__ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_retriever()
SCREAMING_SNAKE_CASE : str = retriever.tokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0, 3, 5] , dtype="""long""" )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(["""Test question"""] ).input_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(
["""the fourth""", """longer longer"""] , add_special_tokens=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , ).input_ids
SCREAMING_SNAKE_CASE : Optional[int] = config.reader_seq_len
SCREAMING_SNAKE_CASE : Tuple = retriever(
lowerCamelCase__ , lowerCamelCase__ , answer_ids=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors="""np""" )
self.assertEqual([False, True, True] , lowerCamelCase__ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , lowerCamelCase__ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , lowerCamelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
# Test local path
SCREAMING_SNAKE_CASE : Union[str, Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
# Test mocked remote path
with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download:
SCREAMING_SNAKE_CASE : Any = os.path.join(
os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME )
SCREAMING_SNAKE_CASE : Dict = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" )
self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
| 379 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
SCREAMING_SNAKE_CASE_:Optional[int] = logging.getLogger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = bnb_quantization_config.load_in_abit
A : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
A : Any = []
# custom device map
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(device_map.keys() ) > 1:
A : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A : int = get_keys_to_not_convert(_lowerCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowerCAmelCase )
A : Optional[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A : Dict = []
A : Tuple = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowerCAmelCase )
# compatibility with peft
A : Union[str, Any] = load_in_abit
A : Tuple = load_in_abit
A : List[str] = get_parameter_device(_lowerCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
A : Optional[int] = replace_with_bnb_layers(_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
# convert param to the right dtype
A : Tuple = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A : Optional[Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
A : int = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowerCAmelCase ):
param.to(_lowerCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
A : str = replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
A : Optional[Any] = get_quantized_model_device_map(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_memory=_lowerCAmelCase , no_split_module_classes=_lowerCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A : Tuple = True
A : int = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCAmelCase , offload_state_dict=_lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowerCAmelCase , device_map=_lowerCAmelCase , offload_dir=_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[int]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
A : Optional[int] = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
A : Tuple = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A : Any = {}
A : List[str] = special_dtypes
A : Any = no_split_module_classes
A : Union[str, Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A : Tuple = get_balanced_memory(
_lowerCAmelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=_lowerCAmelCase , **_lowerCAmelCase , )
A : int = max_memory
A : Any = infer_auto_device_map(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# check if don't have any quantized module on the cpu
A : Optional[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A : Optional[int] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
A : Optional[Any] = []
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> int:
"""simple docstring"""
A : Optional[int] = False
for name, module in model.named_children():
if current_key_name is None:
A : int = []
current_key_name.append(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A : Dict = """.""".join(_lowerCAmelCase )
A : Optional[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A : Dict = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A : Optional[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
A : Any = module.weight.data
if module.bias is not None:
A : Any = module.bias.data
bnb_module.requires_grad_(_lowerCAmelCase )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Dict = True
if len(list(module.children() ) ) > 0:
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __UpperCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
with init_empty_weights():
A : Tuple = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A : Optional[int] = find_tied_parameters(_lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A : Optional[int] = sum(_lowerCAmelCase , [] )
A : Tuple = len(_lowerCAmelCase ) > 0
# Check if it is a base model
A : List[str] = False
if hasattr(_lowerCAmelCase , """base_model_prefix""" ):
A : Optional[Any] = not hasattr(_lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A : str = list(model.named_children() )
A : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
A : int = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
A : Optional[Any] = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase )
# remove ".weight" from the keys
A : Union[str, Any] = [""".weight""", """.bias"""]
A : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A : List[str] = name.replace(_lowerCAmelCase , """""" )
filtered_module_names.append(_lowerCAmelCase )
return filtered_module_names
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
for m in model.modules():
if isinstance(_lowerCAmelCase , bnb.nn.Linearabit ):
return True
return False
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return next(parameter.parameters() ).device
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , 0 , dtype=_lowerCAmelCase , value=_lowerCAmelCase )
A : Tuple = param_name
A : Union[str, Any] = model
if "." in tensor_name:
A : int = tensor_name.split(""".""" )
for split in splits[:-1]:
A : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
A : Optional[Any] = new_module
A : List[str] = splits[-1]
# offload weights
A : Optional[int] = False
offload_weight(module._parameters[tensor_name] , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase , )
else:
offload_weight(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
offload_weight(_lowerCAmelCase , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase )
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , """meta""" , dtype=_lowerCAmelCase , value=torch.empty(*param.size() ) )
| 662 | 0 |
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase__ ) -> int:
'''simple docstring'''
a__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _lowerCamelCase ( UpperCAmelCase__ = 1_00 ) -> int:
'''simple docstring'''
a__ = 1
a__ = 2
for i in range(2,max_n + 1 ):
a__ = pre_numerator
a__ = 2 * i // 3 if i % 3 == 0 else 1
a__ = cur_numerator
a__ = e_cont * pre_numerator + temp
return sum_digits(_lowerCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 232 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
A : Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
A : Dict = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
A : Tuple = parser.parse_args()
if not hasattr(_lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
A : Any = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 662 | 0 |
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case ) -> bool:
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> bool:
if curr_ind == len(_lowerCAmelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_lowerCAmelCase ) ):
if valid_connection(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# Insert current vertex into path as next transition
__lowercase = next_ver
# Validate created path
if util_hamilton_cycle(_lowerCAmelCase , _lowerCAmelCase , curr_ind + 1 ):
return True
# Backtrack
__lowercase = -1
return False
def SCREAMING_SNAKE_CASE ( snake_case , snake_case = 0 ) -> list[int]:
__lowercase = [-1] * (len(_lowerCAmelCase ) + 1)
# initialize start and end of path with starting index
__lowercase = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_lowerCAmelCase , _lowerCAmelCase , 1 ) else []
| 375 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:int = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 | 0 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Optional[Any]=32 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : List[Any]=10 , lowerCAmelCase__ : List[Any]=[10, 20, 30, 40] , lowerCAmelCase__ : int=[1, 1, 2, 1] , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Tuple="relu" , lowerCAmelCase__ : str=3 , lowerCAmelCase__ : Union[str, Any]=None , ):
SCREAMING_SNAKE_CASE_: int = parent
SCREAMING_SNAKE_CASE_: str = batch_size
SCREAMING_SNAKE_CASE_: List[str] = image_size
SCREAMING_SNAKE_CASE_: Dict = num_channels
SCREAMING_SNAKE_CASE_: Optional[int] = embeddings_size
SCREAMING_SNAKE_CASE_: Optional[int] = hidden_sizes
SCREAMING_SNAKE_CASE_: List[str] = depths
SCREAMING_SNAKE_CASE_: List[Any] = is_training
SCREAMING_SNAKE_CASE_: int = use_labels
SCREAMING_SNAKE_CASE_: str = hidden_act
SCREAMING_SNAKE_CASE_: List[Any] = num_labels
SCREAMING_SNAKE_CASE_: Union[str, Any] = scope
SCREAMING_SNAKE_CASE_: Optional[int] = len(lowerCamelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_: Tuple = ids_tensor([self.batch_size] , self.num_labels)
SCREAMING_SNAKE_CASE_: List[str] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = RegNetModel(config=lowerCamelCase__)
model.to(lowerCamelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Tuple = model(lowerCamelCase__)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: Dict = self.num_labels
SCREAMING_SNAKE_CASE_: Dict = RegNetForImageClassification(lowerCamelCase__)
model.to(lowerCamelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Optional[Any] = model(lowerCamelCase__ , labels=lowerCamelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Tuple = config_and_inputs
SCREAMING_SNAKE_CASE_: Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : str = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
_UpperCAmelCase : Optional[int] = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Any = False
_UpperCAmelCase : str = False
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[str] = RegNetModelTester(self)
SCREAMING_SNAKE_CASE_: Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : Any):
return
@unittest.skip(reason="RegNet does not use inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
pass
@unittest.skip(reason="RegNet does not support input and output embeddings")
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: List[Any] = model_class(lowerCamelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: List[str] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int = model_class(config=lowerCamelCase__)
for name, module in model.named_modules():
if isinstance(lowerCamelCase__ , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
def check_hidden_states_output(lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: int = model_class(lowerCamelCase__)
model.to(lowerCamelCase__)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Any = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__))
SCREAMING_SNAKE_CASE_: List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_: str = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__) , expected_num_stages + 1)
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
SCREAMING_SNAKE_CASE_: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: List[str] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE_: List[Any] = layer_type
SCREAMING_SNAKE_CASE_: Tuple = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Optional[Any] = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Union[str, Any] = RegNetModel.from_pretrained(lowerCamelCase__)
self.assertIsNotNone(lowerCamelCase__)
def A_ ( ):
SCREAMING_SNAKE_CASE_: Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Dict = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(lowerCamelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_: List[Any] = prepare_img()
SCREAMING_SNAKE_CASE_: Union[str, Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt").to(lowerCamelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[str] = model(**lowerCamelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowerCamelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([-0.4180, -1.5051, -3.4836]).to(lowerCamelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4))
| 671 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
A : Optional[int] = int(_lowerCAmelCase )
# Initialize Result
A : int = []
# Traverse through all denomination
for denomination in reversed(_lowerCAmelCase ):
# Find denominations
while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ):
total_value -= int(_lowerCAmelCase )
answer.append(_lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = []
SCREAMING_SNAKE_CASE_:Dict = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
SCREAMING_SNAKE_CASE_:Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F"""Following is minimal change for {value}: """)
SCREAMING_SNAKE_CASE_:str = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 662 | 0 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( SCREAMING_SNAKE_CASE__ ,unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = XLMTokenizer
__lowerCAmelCase = False
def snake_case__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__lowercase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__lowercase = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(lowerCamelCase__ ) )
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = """lower newer"""
__lowercase = """lower newer"""
return input_text, output_text
def snake_case__ ( self ):
__lowercase = XLMTokenizer(self.vocab_file , self.merges_file )
__lowercase = """lower"""
__lowercase = ["""low""", """er</w>"""]
__lowercase = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = tokens + ["""<unk>"""]
__lowercase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
@slow
def snake_case__ ( self ):
__lowercase = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
__lowercase = tokenizer.encode("sequence builders" , add_special_tokens=lowerCamelCase__ )
__lowercase = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCamelCase__ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 321 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_:Union[str, Any] = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE_:Any = dict(zip(vocab, range(len(vocab))))
SCREAMING_SNAKE_CASE_:Dict = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_:List[Any] = Path(tmpdirname)
SCREAMING_SNAKE_CASE_:str = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
SCREAMING_SNAKE_CASE_:Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
SCREAMING_SNAKE_CASE_:Any = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
SCREAMING_SNAKE_CASE_:Optional[Any] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
SCREAMING_SNAKE_CASE_:Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
SCREAMING_SNAKE_CASE_:str = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 662 | 0 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
a_ = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
a_ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
a_ = dict(zip(vocab, range(len(vocab))))
a_ = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
a_ = Path(tmpdirname)
a_ = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
a_ = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
a_ = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
a_ = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
a_ = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
a_ = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")
# Test
a_ = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
a_ = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 175 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = """Hello, World!"""
SCREAMING_SNAKE_CASE_:List[Any] = """en_XX"""
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Optional[int] = Path("""data_bin""" )
A : Optional[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
A : Any = xmod.model.encoder.sentence_encoder
A : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowerCAmelCase )
A : int = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A : Any = xmod_sent_encoder.embed_tokens.weight
A : int = xmod_sent_encoder.embed_positions.weight
A : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A : Dict = xmod_sent_encoder.layernorm_embedding.weight
A : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A : str = model.roberta.encoder.layer[i]
A : Tuple = xmod_sent_encoder.layers[i]
# self attention
A : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
A : List[str] = xmod_layer.self_attn.q_proj.weight
A : Optional[int] = xmod_layer.self_attn.q_proj.bias
A : List[Any] = xmod_layer.self_attn.k_proj.weight
A : Union[str, Any] = xmod_layer.self_attn.k_proj.bias
A : Optional[int] = xmod_layer.self_attn.v_proj.weight
A : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
A : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
A : Optional[Any] = xmod_layer.self_attn.out_proj.weight
A : Dict = xmod_layer.self_attn.out_proj.bias
A : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight
A : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
A : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
A : Optional[int] = xmod_layer.fca.weight
A : Optional[int] = xmod_layer.fca.bias
# output
A : Dict = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
A : Union[str, Any] = xmod_layer.fca.weight
A : int = xmod_layer.fca.bias
A : List[str] = xmod_layer.final_layer_norm.weight
A : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A : str = xmod_layer.adapter_layer_norm.weight
A : str = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A : Optional[int] = bert_output.adapter_modules[lang_code]
A : int = xmod_layer.adapter_modules[lang_code]
A : Optional[Any] = from_adapter.fca.weight
A : Optional[Any] = from_adapter.fca.bias
A : List[str] = from_adapter.fca.weight
A : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A : Dict = xmod_sent_encoder.layer_norm.weight
A : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
A : int = xmod.model.classification_heads["""mnli"""].dense.weight
A : Optional[Any] = xmod.model.classification_heads["""mnli"""].dense.bias
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
A : Any = xmod.model.encoder.lm_head.dense.weight
A : Tuple = xmod.model.encoder.lm_head.dense.bias
A : Any = xmod.model.encoder.lm_head.layer_norm.weight
A : List[str] = xmod.model.encoder.lm_head.layer_norm.bias
A : Union[str, Any] = xmod.model.encoder.lm_head.weight
A : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A : Optional[int] = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
A : List[str] = model(_lowerCAmelCase )[0]
if classification_head:
A : Dict = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) )
else:
A : Optional[Any] = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A : Optional[Any] = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 662 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : Any ) -> str:
lowerCamelCase__ : str = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
lowerCamelCase__ : str = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
lowerCamelCase__ : str = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
lowerCamelCase__ : List[str] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 16000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
lowerCamelCase__ : Optional[int] = tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , lowerCamelCase__ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '\n' )
# load decoder from hub
lowerCamelCase__ : Union[str, Any] = """hf-internal-testing/ngram-beam-search-decoder"""
def A_ ( self : Union[str, Any] , **UpperCAmelCase : str ) -> Tuple:
lowerCamelCase__ : str = self.add_kwargs_tokens_map.copy()
kwargs.update(lowerCamelCase__ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def A_ ( self : str , **UpperCAmelCase : List[Any] ) -> Tuple:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def A_ ( self : str , **UpperCAmelCase : int ) -> List[Any]:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **lowerCamelCase__ )
def A_ ( self : Optional[Any] ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = self.get_tokenizer()
lowerCamelCase__ : Union[str, Any] = self.get_feature_extractor()
lowerCamelCase__ : List[Any] = self.get_decoder()
lowerCamelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , decoder=lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase__ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase__ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , lowerCamelCase__ )
def A_ ( self : Tuple ) -> Optional[int]:
lowerCamelCase__ : List[Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
lowerCamelCase__ : Tuple = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def A_ ( self : Optional[int] ) -> Optional[int]:
lowerCamelCase__ : List[str] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(lowerCamelCase__ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=lowerCamelCase__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def A_ ( self : Optional[Any] ) -> Tuple:
lowerCamelCase__ : Optional[int] = self.get_feature_extractor()
lowerCamelCase__ : Dict = self.get_tokenizer()
lowerCamelCase__ : Optional[int] = self.get_decoder()
lowerCamelCase__ : str = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , decoder=lowerCamelCase__ )
lowerCamelCase__ : List[str] = floats_list((3, 1000) )
lowerCamelCase__ : Optional[Any] = feature_extractor(lowerCamelCase__ , return_tensors='np' )
lowerCamelCase__ : Dict = processor(lowerCamelCase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCamelCase__ : Any = self.get_feature_extractor()
lowerCamelCase__ : Optional[Any] = self.get_tokenizer()
lowerCamelCase__ : List[str] = self.get_decoder()
lowerCamelCase__ : int = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , decoder=lowerCamelCase__ )
lowerCamelCase__ : Tuple = """This is a test string"""
lowerCamelCase__ : List[str] = processor(text=lowerCamelCase__ )
lowerCamelCase__ : Optional[int] = tokenizer(lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : Dict , UpperCAmelCase : str=(2, 10, 16) , UpperCAmelCase : Optional[Any]=77 ) -> Optional[int]:
np.random.seed(lowerCamelCase__ )
return np.random.rand(*lowerCamelCase__ )
def A_ ( self : Dict ) -> Dict:
lowerCamelCase__ : List[str] = self.get_feature_extractor()
lowerCamelCase__ : Tuple = self.get_tokenizer()
lowerCamelCase__ : Any = self.get_decoder()
lowerCamelCase__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , decoder=lowerCamelCase__ )
lowerCamelCase__ : List[str] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
lowerCamelCase__ : List[str] = processor.decode(lowerCamelCase__ )
lowerCamelCase__ : Tuple = decoder.decode_beams(lowerCamelCase__ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def A_ ( self : Tuple , UpperCAmelCase : List[Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = self.get_feature_extractor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = self.get_decoder()
lowerCamelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , decoder=lowerCamelCase__ )
lowerCamelCase__ : int = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
lowerCamelCase__ : Union[str, Any] = processor.batch_decode(lowerCamelCase__ )
else:
with get_context(lowerCamelCase__ ).Pool() as pool:
lowerCamelCase__ : int = processor.batch_decode(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase__ : List[str] = list(lowerCamelCase__ )
with get_context('fork' ).Pool() as p:
lowerCamelCase__ : int = decoder.decode_beams_batch(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase__ : Dict = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowerCamelCase__ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(lowerCamelCase__ , decoded_processor.logit_score )
self.assertListEqual(lowerCamelCase__ , decoded_processor.lm_score )
def A_ ( self : List[Any] ) -> Tuple:
lowerCamelCase__ : List[str] = self.get_feature_extractor()
lowerCamelCase__ : Dict = self.get_tokenizer()
lowerCamelCase__ : List[str] = self.get_decoder()
lowerCamelCase__ : List[str] = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , decoder=lowerCamelCase__ )
lowerCamelCase__ : Tuple = self._get_dummy_logits()
lowerCamelCase__ : int = 15
lowerCamelCase__ : List[Any] = -2_0.0
lowerCamelCase__ : Optional[int] = -4.0
lowerCamelCase__ : Dict = processor.batch_decode(
lowerCamelCase__ , beam_width=lowerCamelCase__ , beam_prune_logp=lowerCamelCase__ , token_min_logp=lowerCamelCase__ , )
lowerCamelCase__ : List[str] = decoded_processor_out.text
lowerCamelCase__ : Tuple = list(lowerCamelCase__ )
with get_context('fork' ).Pool() as pool:
lowerCamelCase__ : str = decoder.decode_beams_batch(
lowerCamelCase__ , lowerCamelCase__ , beam_width=lowerCamelCase__ , beam_prune_logp=lowerCamelCase__ , token_min_logp=lowerCamelCase__ , )
lowerCamelCase__ : Union[str, Any] = [d[0][0] for d in decoded_decoder_out]
lowerCamelCase__ : str = [d[0][2] for d in decoded_decoder_out]
lowerCamelCase__ : Any = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , lowerCamelCase__ )
self.assertTrue(np.array_equal(lowerCamelCase__ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , lowerCamelCase__ , atol=1e-3 ) )
self.assertTrue(np.array_equal(lowerCamelCase__ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , lowerCamelCase__ , atol=1e-3 ) )
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : str = self.get_feature_extractor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : int = self.get_decoder()
lowerCamelCase__ : Dict = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , decoder=lowerCamelCase__ )
lowerCamelCase__ : int = self._get_dummy_logits()
lowerCamelCase__ : Dict = 2.0
lowerCamelCase__ : Optional[Any] = 5.0
lowerCamelCase__ : Optional[Any] = -2_0.0
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : List[Any] = processor.batch_decode(
lowerCamelCase__ , alpha=lowerCamelCase__ , beta=lowerCamelCase__ , unk_score_offset=lowerCamelCase__ , lm_score_boundary=lowerCamelCase__ , )
lowerCamelCase__ : Optional[int] = decoded_processor_out.text
lowerCamelCase__ : Tuple = list(lowerCamelCase__ )
decoder.reset_params(
alpha=lowerCamelCase__ , beta=lowerCamelCase__ , unk_score_offset=lowerCamelCase__ , lm_score_boundary=lowerCamelCase__ , )
with get_context('fork' ).Pool() as pool:
lowerCamelCase__ : List[Any] = decoder.decode_beams_batch(
lowerCamelCase__ , lowerCamelCase__ , )
lowerCamelCase__ : List[Any] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , lowerCamelCase__ )
lowerCamelCase__ : str = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , lowerCamelCase__ )
def A_ ( self : Optional[int] ) -> str:
lowerCamelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
lowerCamelCase__ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
lowerCamelCase__ : Tuple = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
lowerCamelCase__ : Dict = os.listdir(lowerCamelCase__ )
lowerCamelCase__ : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def A_ ( self : Optional[int] ) -> Optional[Any]:
lowerCamelCase__ : List[Any] = snapshot_download('hf-internal-testing/processor_with_lm' )
lowerCamelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(lowerCamelCase__ )
lowerCamelCase__ : str = processor.decoder.model_container[processor.decoder._model_key]
lowerCamelCase__ : Any = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
lowerCamelCase__ : str = os.listdir(lowerCamelCase__ )
lowerCamelCase__ : List[str] = os.listdir(lowerCamelCase__ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def A_ ( self : Optional[int] ) -> Any:
lowerCamelCase__ : int = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
lowerCamelCase__ : Dict = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
lowerCamelCase__ : List[Any] = floats_list((3, 1000) )
lowerCamelCase__ : List[Any] = processor_wavaveca(lowerCamelCase__ , return_tensors='np' )
lowerCamelCase__ : Union[str, Any] = processor_auto(lowerCamelCase__ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
lowerCamelCase__ : Dict = self._get_dummy_logits()
lowerCamelCase__ : Optional[Any] = processor_wavaveca.batch_decode(lowerCamelCase__ )
lowerCamelCase__ : str = processor_auto.batch_decode(lowerCamelCase__ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def A_ ( self : Dict ) -> Dict:
lowerCamelCase__ : str = self.get_feature_extractor()
lowerCamelCase__ : Any = self.get_tokenizer()
lowerCamelCase__ : int = self.get_decoder()
lowerCamelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , decoder=lowerCamelCase__ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def A_ ( UpperCAmelCase : int , UpperCAmelCase : str ) -> Union[str, Any]:
lowerCamelCase__ : str = [d[key] for d in offsets]
return retrieved_list
def A_ ( self : Optional[int] ) -> Tuple:
lowerCamelCase__ : List[Any] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
lowerCamelCase__ : Optional[Any] = self._get_dummy_logits()[0]
lowerCamelCase__ : Dict = processor.decode(lowerCamelCase__ , output_word_offsets=lowerCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def A_ ( self : Dict ) -> Optional[int]:
lowerCamelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
lowerCamelCase__ : Optional[Any] = self._get_dummy_logits()
lowerCamelCase__ : int = processor.batch_decode(lowerCamelCase__ , output_word_offsets=lowerCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(lowerCamelCase__ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def A_ ( self : Any ) -> List[str]:
import torch
lowerCamelCase__ : Dict = load_dataset('common_voice' , 'en' , split='train' , streaming=lowerCamelCase__ )
lowerCamelCase__ : List[str] = ds.cast_column('audio' , datasets.Audio(sampling_rate=16000 ) )
lowerCamelCase__ : List[str] = iter(lowerCamelCase__ )
lowerCamelCase__ : int = next(lowerCamelCase__ )
lowerCamelCase__ : List[Any] = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
lowerCamelCase__ : Optional[Any] = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
lowerCamelCase__ : Tuple = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase__ ).logits.cpu().numpy()
lowerCamelCase__ : Any = processor.decode(logits[0] , output_word_offsets=lowerCamelCase__ )
lowerCamelCase__ : Any = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
lowerCamelCase__ : List[Any] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
lowerCamelCase__ : str = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(' '.join(self.get_from_offsets(lowerCamelCase__ , 'word' ) ) , lowerCamelCase__ )
self.assertEqual(' '.join(self.get_from_offsets(lowerCamelCase__ , 'word' ) ) , output.text )
# output times
lowerCamelCase__ : Dict = torch.tensor(self.get_from_offsets(lowerCamelCase__ , 'start_time' ) )
lowerCamelCase__ : List[str] = torch.tensor(self.get_from_offsets(lowerCamelCase__ , 'end_time' ) )
# fmt: off
lowerCamelCase__ : Tuple = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
lowerCamelCase__ : List[Any] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=0.0_1 ) )
| 295 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Any = tempfile.mkdtemp()
A : List[str] = BlipImageProcessor()
A : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
A : str = BlipProcessor(lowerCamelCase__, lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).tokenizer
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).image_processor
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Any = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : Any = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : int = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Any = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
A : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase__, padding_value=1.0 )
A : Dict = BlipProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : str = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = self.prepare_image_inputs()
A : int = image_processor(lowerCamelCase__, return_tensors="""np""" )
A : Optional[Any] = processor(images=lowerCamelCase__, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : List[str] = self.get_image_processor()
A : int = self.get_tokenizer()
A : str = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = """lower newer"""
A : List[Any] = processor(text=lowerCamelCase__ )
A : str = tokenizer(lowerCamelCase__, return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Union[str, Any] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : Union[str, Any] = self.prepare_image_inputs()
A : str = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Optional[int] = processor.batch_decode(lowerCamelCase__ )
A : Dict = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : int = self.get_tokenizer()
A : Optional[int] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : List[str] = self.prepare_image_inputs()
A : Optional[int] = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 662 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : Tuple = """▁"""
lowercase__ : Any = {"""vocab_file""": """spiece.model"""}
lowercase__ : Union[str, Any] = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
lowercase__ : Optional[int] = {
"""google/reformer-crime-and-punishment""": 52_42_88,
}
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
_lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Any , lowercase_ : Union[str, Any] , lowercase_ : int="</s>" , lowercase_ : Optional[int]="<unk>" , lowercase_ : Dict=[] , lowercase_ : Any = None , **lowercase_ : Union[str, Any] , ):
snake_case_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
snake_case_ : int = vocab_file
snake_case_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
@property
def _snake_case ( self : Optional[Any] ):
return self.sp_model.get_piece_size()
def _snake_case ( self : Any ):
snake_case_ : Union[str, Any] = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ):
snake_case_ : List[Any] = self.__dict__.copy()
snake_case_ : Any = None
return state
def __setstate__( self : Optional[int] , lowercase_ : List[str] ):
snake_case_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ : List[str] = {}
snake_case_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self : Optional[int] , lowercase_ : int ):
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def _snake_case ( self : List[Any] , lowercase_ : Any ):
return self.sp_model.piece_to_id(lowerCamelCase__ )
def _snake_case ( self : Union[str, Any] , lowercase_ : Any ):
if index < self.sp_model.get_piece_size():
snake_case_ : int = self.sp_model.IdToPiece(lowerCamelCase__ )
return token
def _snake_case ( self : List[str] , lowercase_ : str ):
snake_case_ : Any = []
snake_case_ : str = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
snake_case_ : int = []
else:
current_sub_tokens.append(lowerCamelCase__ )
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def _snake_case ( self : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[int] = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ : Optional[Any] = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , '''wb''' ) as fi:
snake_case_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 123 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
return f'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 4, 64, 64), lowerCamelCase__=False ):
A : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return image
def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__="CompVis/stable-diffusion-v1-4" ):
A : str = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = """bf16""" if fpaa else None
A , A : str = FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase__, subfolder="""unet""", dtype=lowerCamelCase__, revision=lowerCamelCase__ )
return model, params
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 77, 768), lowerCamelCase__=False ):
A : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
A : List[str] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : List[str] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""", fpaa=lowerCamelCase__ )
A : str = self.get_latents(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : int = self.get_encoder_hidden_states(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : Optional[Any] = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : Dict = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""", fpaa=lowerCamelCase__ )
A : int = self.get_latents(lowerCamelCase__, shape=(4, 4, 96, 96), fpaa=lowerCamelCase__ )
A : Union[str, Any] = self.get_encoder_hidden_states(lowerCamelCase__, shape=(4, 77, 1024), fpaa=lowerCamelCase__ )
A : Dict = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : List[Any] = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
| 662 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _snake_case ( SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__: str = "deformable_detr"
lowerCamelCase__: Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self: Dict , __lowerCamelCase: int=True , __lowerCamelCase: Union[str, Any]=None , __lowerCamelCase: Optional[int]=3 , __lowerCamelCase: Dict=3_00 , __lowerCamelCase: Union[str, Any]=10_24 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Optional[int]=10_24 , __lowerCamelCase: Optional[int]=8 , __lowerCamelCase: List[Any]=6 , __lowerCamelCase: str=10_24 , __lowerCamelCase: Optional[int]=8 , __lowerCamelCase: int=0.0 , __lowerCamelCase: List[str]=True , __lowerCamelCase: Optional[Any]="relu" , __lowerCamelCase: int=2_56 , __lowerCamelCase: int=0.1 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: List[Any]=0.0 , __lowerCamelCase: str=0.02 , __lowerCamelCase: Optional[int]=1.0 , __lowerCamelCase: Any=True , __lowerCamelCase: Tuple=False , __lowerCamelCase: Dict="sine" , __lowerCamelCase: Any="resnet50" , __lowerCamelCase: int=True , __lowerCamelCase: str=False , __lowerCamelCase: Union[str, Any]=4 , __lowerCamelCase: Any=4 , __lowerCamelCase: Tuple=4 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Union[str, Any]=3_00 , __lowerCamelCase: Dict=False , __lowerCamelCase: Dict=1 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: str=2 , __lowerCamelCase: str=1 , __lowerCamelCase: int=1 , __lowerCamelCase: str=5 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: Optional[Any]=0.25 , __lowerCamelCase: Optional[Any]=False , **__lowerCamelCase: Union[str, Any] , ) -> Union[str, Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCAmelCase : Union[str, Any] = CONFIG_MAPPING["""resnet"""](out_features=["stage4"] )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCAmelCase : int = backbone_config.get("model_type" )
__UpperCAmelCase : Optional[int] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : Any = config_class.from_dict(lowerCamelCase__ )
__UpperCAmelCase : int = use_timm_backbone
__UpperCAmelCase : Dict = backbone_config
__UpperCAmelCase : Tuple = num_channels
__UpperCAmelCase : Optional[Any] = num_queries
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : List[Any] = d_model
__UpperCAmelCase : Optional[int] = encoder_ffn_dim
__UpperCAmelCase : List[str] = encoder_layers
__UpperCAmelCase : List[str] = encoder_attention_heads
__UpperCAmelCase : str = decoder_ffn_dim
__UpperCAmelCase : List[str] = decoder_layers
__UpperCAmelCase : Optional[int] = decoder_attention_heads
__UpperCAmelCase : Optional[int] = dropout
__UpperCAmelCase : Union[str, Any] = attention_dropout
__UpperCAmelCase : Optional[int] = activation_dropout
__UpperCAmelCase : str = activation_function
__UpperCAmelCase : Optional[int] = init_std
__UpperCAmelCase : List[Any] = init_xavier_std
__UpperCAmelCase : int = encoder_layerdrop
__UpperCAmelCase : Dict = auxiliary_loss
__UpperCAmelCase : Dict = position_embedding_type
__UpperCAmelCase : Dict = backbone
__UpperCAmelCase : int = use_pretrained_backbone
__UpperCAmelCase : List[str] = dilation
# deformable attributes
__UpperCAmelCase : List[str] = num_feature_levels
__UpperCAmelCase : Union[str, Any] = encoder_n_points
__UpperCAmelCase : Union[str, Any] = decoder_n_points
__UpperCAmelCase : Optional[int] = two_stage
__UpperCAmelCase : Union[str, Any] = two_stage_num_proposals
__UpperCAmelCase : str = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
__UpperCAmelCase : Tuple = class_cost
__UpperCAmelCase : str = bbox_cost
__UpperCAmelCase : int = giou_cost
# Loss coefficients
__UpperCAmelCase : Optional[int] = mask_loss_coefficient
__UpperCAmelCase : int = dice_loss_coefficient
__UpperCAmelCase : Optional[int] = bbox_loss_coefficient
__UpperCAmelCase : Optional[int] = giou_loss_coefficient
__UpperCAmelCase : List[Any] = eos_coefficient
__UpperCAmelCase : str = focal_alpha
__UpperCAmelCase : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ )
@property
def _lowerCamelCase ( self: Dict ) -> Any:
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self: Dict ) -> Union[str, Any]:
return self.d_model
def _lowerCamelCase ( self: int ) -> int:
__UpperCAmelCase : Any = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__UpperCAmelCase : Optional[int] = self.backbone_config.to_dict()
__UpperCAmelCase : Any = self.__class__.model_type
return output
| 382 |
from typing import Any
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Any = v.conjugate().T
A : List[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A : str = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 662 | 0 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 654 |
from __future__ import annotations
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
A , A : int = np.shape(_lowerCAmelCase )
if rows != columns:
A : Union[str, Any] = (
"""'table' has to be of square shaped array but got a """
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_lowerCAmelCase )
A : Union[str, Any] = np.zeros((rows, columns) )
A : Dict = np.zeros((rows, columns) )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
A : Any = (table[i][j] - total) / upper[j][j]
A : Union[str, Any] = 1
for j in range(_lowerCAmelCase , _lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
A : str = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 0 |
from math import asin, atan, cos, radians, sin, sqrt, tan
__A = 637_8137.0
__A = 635_6752.31_4245
__A = 637_8137
def lowerCAmelCase_ ( __a , __a , __a , __a ) -> float:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =(AXIS_A - AXIS_B) / AXIS_A
lowerCamelCase__: Dict =atan((1 - flattening) * tan(radians(_lowerCAmelCase ) ) )
lowerCamelCase__: List[str] =atan((1 - flattening) * tan(radians(_lowerCAmelCase ) ) )
lowerCamelCase__: List[Any] =radians(_lowerCAmelCase )
lowerCamelCase__: Dict =radians(_lowerCAmelCase )
# Equation
lowerCamelCase__: Tuple =sin((phi_a - phi_a) / 2 )
lowerCamelCase__: Union[str, Any] =sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowerCamelCase__: Dict =sqrt(sin_sq_phi + (cos(_lowerCAmelCase ) * cos(_lowerCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None ):
A : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
A : Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
A : Any = math.ceil(val / multiple ) * multiple
return x
A : Optional[Any] = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size
A , A : List[Any] = get_image_size(_lowerCAmelCase )
A , A : List[Any] = output_size
# determine new height and width
A : Optional[int] = output_height / input_height
A : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
A : Any = scale_width
else:
# fit height
A : int = scale_height
A : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase )
A : int = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : int = size if size is not None else {"""height""": 384, """width""": 384}
A : str = get_size_dict(lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Optional[int] = size
A : Union[str, Any] = keep_aspect_ratio
A : int = ensure_multiple_of
A : Dict = resample
A : Optional[Any] = do_rescale
A : Any = rescale_factor
A : str = do_normalize
A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Dict = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
A : Optional[Any] = get_resize_output_image_size(
lowerCamelCase__, output_size=(size["""height"""], size["""width"""]), keep_aspect_ratio=lowerCamelCase__, multiple=lowerCamelCase__, )
return resize(lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A : str = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__ )
A : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
A : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
A : Tuple = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A : int = do_normalize if do_normalize is not None else self.do_normalize
A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
A : Optional[int] = image_std if image_std is not None else self.image_std
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : str = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Dict = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : Optional[Any] = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Dict = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCamelCase__ ):
A : int = target_sizes.numpy()
A : Union[str, Any] = []
for idx in range(len(lowerCamelCase__ ) ):
A : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="""bilinear""", align_corners=lowerCamelCase__ )
A : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
A : List[str] = logits.argmax(dim=1 )
A : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 662 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
__UpperCAmelCase = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
__UpperCAmelCase = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = "whisper"
SCREAMING_SNAKE_CASE__ = ["past_key_values"]
SCREAMING_SNAKE_CASE__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[str] , lowerCamelCase_ : List[Any]=5_18_65 , lowerCamelCase_ : Tuple=80 , lowerCamelCase_ : Optional[Any]=6 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Optional[int]=6 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Union[str, Any]=15_36 , lowerCamelCase_ : Tuple=15_36 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : Dict=5_02_57 , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Any="gelu" , lowerCamelCase_ : str=2_56 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Tuple=0.0 , lowerCamelCase_ : Dict=0.02 , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Any=15_00 , lowerCamelCase_ : List[Any]=4_48 , lowerCamelCase_ : List[str]=5_02_56 , lowerCamelCase_ : Any=5_02_56 , lowerCamelCase_ : List[str]=5_02_56 , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : int=[2_20, 5_02_56] , lowerCamelCase_ : int=False , lowerCamelCase_ : List[str]=2_56 , lowerCamelCase_ : Any=False , lowerCamelCase_ : Union[str, Any]=0.05 , lowerCamelCase_ : Tuple=10 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : Optional[Any]=0.0 , lowerCamelCase_ : int=10 , lowerCamelCase_ : List[Any]=0 , lowerCamelCase_ : Dict=7 , **lowerCamelCase_ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_mel_bins
SCREAMING_SNAKE_CASE : Any = d_model
SCREAMING_SNAKE_CASE : int = encoder_layers
SCREAMING_SNAKE_CASE : Any = encoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : List[str] = encoder_ffn_dim
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : int = attention_dropout
SCREAMING_SNAKE_CASE : List[str] = activation_dropout
SCREAMING_SNAKE_CASE : Any = activation_function
SCREAMING_SNAKE_CASE : Optional[Any] = init_std
SCREAMING_SNAKE_CASE : Any = encoder_layerdrop
SCREAMING_SNAKE_CASE : List[str] = decoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE : int = encoder_layers
SCREAMING_SNAKE_CASE : Any = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : Optional[Any] = max_source_positions
SCREAMING_SNAKE_CASE : int = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE : Optional[Any] = classifier_proj_size
SCREAMING_SNAKE_CASE : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE : Union[str, Any] = apply_spec_augment
SCREAMING_SNAKE_CASE : Optional[int] = mask_time_prob
SCREAMING_SNAKE_CASE : int = mask_time_length
SCREAMING_SNAKE_CASE : str = mask_time_min_masks
SCREAMING_SNAKE_CASE : List[Any] = mask_feature_prob
SCREAMING_SNAKE_CASE : Any = mask_feature_length
SCREAMING_SNAKE_CASE : Dict = mask_feature_min_masks
SCREAMING_SNAKE_CASE : Optional[int] = median_filter_width
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , suppress_tokens=lowerCamelCase__ , begin_suppress_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE : Dict = {0: """batch"""}
else:
SCREAMING_SNAKE_CASE : List[str] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__ , direction="""inputs""" )
return common_inputs
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] = -1 , lowerCamelCase_ : Optional[int] = -1 , lowerCamelCase_ : List[Any] = False , lowerCamelCase_ : List[str] = None , lowerCamelCase_ : str = 2_20_50 , lowerCamelCase_ : str = 5.0 , lowerCamelCase_ : Union[str, Any] = 2_20 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = OrderedDict()
SCREAMING_SNAKE_CASE : Dict = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase__ , framework=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , time_duration=lowerCamelCase__ , frequency=lowerCamelCase__ , )
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_inputs["""input_features"""].shape[2]
SCREAMING_SNAKE_CASE : List[Any] = encoder_sequence_length // 2 if self.use_past else seq_length
SCREAMING_SNAKE_CASE : Dict = super().generate_dummy_inputs(
preprocessor.tokenizer , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
SCREAMING_SNAKE_CASE : str = encoder_inputs.pop("""input_features""" )
SCREAMING_SNAKE_CASE : Dict = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
SCREAMING_SNAKE_CASE : Dict = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return 1e-3
| 379 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
# we need a list not a string, so do something to change the type
A : List[Any] = arr.split(""",""" )
def _lowerCAmelCase ( self ):
A : int = [int(self.array[0] )] * len(self.array )
A : Optional[Any] = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
A : Union[str, Any] = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
A : Dict = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:int = input("""please input some numbers:""")
SCREAMING_SNAKE_CASE_:Dict = SubArray(whole_array)
SCREAMING_SNAKE_CASE_:Optional[int] = array.solve_sub_array()
print(("""the results is:""", re))
| 662 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : Optional[Any] , _snake_case : str=7 , _snake_case : Union[str, Any]=3 , _snake_case : str=30 , _snake_case : List[str]=400 , _snake_case : List[str]=True , _snake_case : int=None , _snake_case : Optional[int]=True , _snake_case : Dict=[0.5, 0.5, 0.5] , _snake_case : Tuple=[0.5, 0.5, 0.5] , _snake_case : Any=True , _snake_case : List[str]=1 / 255 , _snake_case : Any=True , ) -> str:
'''simple docstring'''
a__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
a__ = parent
a__ = batch_size
a__ = num_channels
a__ = min_resolution
a__ = max_resolution
a__ = do_resize
a__ = size
a__ = do_normalize
a__ = image_mean
a__ = image_std
a__ = do_rescale
a__ = rescale_factor
a__ = do_pad
def _lowerCAmelCase ( self : Any ) -> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowerCAmelCase ( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Dict=False ) -> Union[str, Any]:
'''simple docstring'''
if not batched:
a__ = image_inputs[0]
if isinstance(lowerCamelCase__ , Image.Image ):
a__ = image.size
else:
a__ = image.shape[1], image.shape[2]
if w < h:
a__ = int(self.size['shortest_edge'] * h / w )
a__ = self.size["""shortest_edge"""]
elif w > h:
a__ = self.size["""shortest_edge"""]
a__ = int(self.size['shortest_edge'] * w / h )
else:
a__ = self.size["""shortest_edge"""]
a__ = self.size["""shortest_edge"""]
else:
a__ = []
for image in image_inputs:
a__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a__ = max(lowerCamelCase__ , key=lambda _snake_case : item[0] )[0]
a__ = max(lowerCamelCase__ , key=lambda _snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
a_ : str =DetaImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
a__ = DetaImageProcessingTester(self )
@property
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
a__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'image_std' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'do_rescale' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'do_pad' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'size' ) )
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
a__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase__ )
def _lowerCAmelCase ( self : int ) -> Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
a__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
a__ = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
a__ = image_processing(lowerCamelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCAmelCase ( self : int ) -> Optional[Any]:
'''simple docstring'''
a__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
a__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
a__ = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ = image_processing(lowerCamelCase__ , return_tensors='pt' ).pixel_values
a__ = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
a__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
a__ = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ = image_processing(lowerCamelCase__ , return_tensors='pt' ).pixel_values
a__ = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
'''simple docstring'''
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
a__ = json.loads(f.read() )
a__ = {"""image_id""": 3_9769, """annotations""": target}
# encode them
a__ = DetaImageProcessor()
a__ = image_processing(images=lowerCamelCase__ , annotations=lowerCamelCase__ , return_tensors='pt' )
# verify pixel values
a__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , lowerCamelCase__ )
a__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowerCamelCase__ , atol=1E-4 ) )
# verify area
a__ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowerCamelCase__ ) )
# verify boxes
a__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowerCamelCase__ )
a__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowerCamelCase__ , atol=1E-3 ) )
# verify image_id
a__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowerCamelCase__ ) )
# verify is_crowd
a__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowerCamelCase__ ) )
# verify class_labels
a__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowerCamelCase__ ) )
# verify orig_size
a__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowerCamelCase__ ) )
# verify size
a__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowerCamelCase__ ) )
@slow
def _lowerCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
a__ = json.loads(f.read() )
a__ = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
a__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
a__ = DetaImageProcessor(format='coco_panoptic' )
a__ = image_processing(images=lowerCamelCase__ , annotations=lowerCamelCase__ , masks_path=lowerCamelCase__ , return_tensors='pt' )
# verify pixel values
a__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , lowerCamelCase__ )
a__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowerCamelCase__ , atol=1E-4 ) )
# verify area
a__ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowerCamelCase__ ) )
# verify boxes
a__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowerCamelCase__ )
a__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowerCamelCase__ , atol=1E-3 ) )
# verify image_id
a__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowerCamelCase__ ) )
# verify is_crowd
a__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowerCamelCase__ ) )
# verify class_labels
a__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowerCamelCase__ ) )
# verify masks
a__ = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowerCamelCase__ )
# verify orig_size
a__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowerCamelCase__ ) )
# verify size
a__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowerCamelCase__ ) )
| 232 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:List[Any] = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "bit"
__lowerCamelCase : Union[str, Any] = ["preactivation", "bottleneck"]
__lowerCamelCase : Union[str, Any] = ["SAME", "VALID"]
def __init__( self, lowerCamelCase__=3, lowerCamelCase__=64, lowerCamelCase__=[256, 512, 1024, 2048], lowerCamelCase__=[3, 4, 6, 3], lowerCamelCase__="preactivation", lowerCamelCase__="relu", lowerCamelCase__=None, lowerCamelCase__=32, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__=32, lowerCamelCase__=1, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A : List[Any] = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
A : Dict = num_channels
A : List[Any] = embedding_size
A : Optional[Any] = hidden_sizes
A : str = depths
A : str = layer_type
A : Union[str, Any] = hidden_act
A : Any = global_padding
A : Optional[int] = num_groups
A : Dict = drop_path_rate
A : List[Any] = embedding_dynamic_padding
A : List[Any] = output_stride
A : Union[str, Any] = width_factor
A : Dict = ["""stem"""] + [f'''stage{idx}''' for idx in range(1, len(lowerCamelCase__ ) + 1 )]
A , A : Any = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__, out_indices=lowerCamelCase__, stage_names=self.stage_names )
| 662 | 0 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
SCREAMING_SNAKE_CASE_ : Tuple = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
SCREAMING_SNAKE_CASE_ : int = """|""".join(sys.argv[1:])
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.compile(rF"""^({joined_dirs}).*?\.py$""")
SCREAMING_SNAKE_CASE_ : str = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 375 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=50, lowerCamelCase__=0.02, lowerCamelCase__=True, lowerCamelCase__=None, ):
A : List[str] = parent
A : List[str] = batch_size
A : Optional[int] = seq_length
A : Optional[int] = is_training
A : Tuple = use_input_mask
A : Optional[Any] = vocab_size
A : str = hidden_size
A : Any = num_hidden_layers
A : List[Any] = num_attention_heads
A : Optional[int] = intermediate_size
A : int = hidden_act
A : Dict = hidden_dropout_prob
A : Optional[Any] = attention_probs_dropout_prob
A : List[Any] = max_position_embeddings
A : int = initializer_range
A : Tuple = use_labels
A : List[str] = scope
def _lowerCAmelCase ( self ):
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : int = None
if self.use_input_mask:
A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCAmelCase ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, )
def _lowerCAmelCase ( self ):
(
(
A
) , (
A
) , (
A
) , (
A
) ,
) : List[Any] = self.prepare_config_and_inputs()
A : Any = True
A : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : str = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Optional[int] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ )
A : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : List[str] = True
A : Union[str, Any] = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, )
A : Optional[Any] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : Union[str, Any] = True
A : Optional[int] = True
A : Optional[int] = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
# first forward pass
A : int = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, use_cache=lowerCamelCase__, )
A : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
A : int = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
A : List[str] = torch.cat([input_ids, next_tokens], dim=-1 )
A : Union[str, Any] = torch.cat([input_mask, next_mask], dim=-1 )
A : List[str] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
# select random slice
A : Any = ids_tensor((1,), output_from_past.shape[-1] ).item()
A : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
A : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__, ):
A : Optional[int] = BertGenerationDecoder(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : List[str] = model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self ):
A , A , A , A : str = self.prepare_config_and_inputs()
A : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__lowerCamelCase : int = (BertGenerationDecoder,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self ):
A : Any = BertGenerationEncoderTester(self )
A : Optional[int] = ConfigTester(self, config_class=lowerCamelCase__, hidden_size=37 )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A , A , A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
A : Any = """bert"""
self.model_tester.create_and_check_model(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
A : int = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, )
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
A : Tuple = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : Optional[int] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Union[str, Any] = model(lowerCamelCase__ )[0]
A : List[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Tuple = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Dict = model(lowerCamelCase__ )[0]
A : List[str] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Optional[Any] = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
| 662 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __lowercase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_UpperCAmelCase : int = "mobilenet_v2"
def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Optional[int]=224 , lowerCAmelCase__ : str=1.0 , lowerCAmelCase__ : List[str]=8 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : Union[str, Any]=6 , lowerCAmelCase__ : List[str]=32 , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Dict="relu6" , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Union[str, Any]=0.8 , lowerCAmelCase__ : Union[str, Any]=0.02 , lowerCAmelCase__ : Dict=0.001 , lowerCAmelCase__ : List[Any]=255 , **lowerCAmelCase__ : List[Any] , ):
super().__init__(**lowerCamelCase__)
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero.")
SCREAMING_SNAKE_CASE_: Any = num_channels
SCREAMING_SNAKE_CASE_: List[Any] = image_size
SCREAMING_SNAKE_CASE_: Optional[Any] = depth_multiplier
SCREAMING_SNAKE_CASE_: int = depth_divisible_by
SCREAMING_SNAKE_CASE_: List[str] = min_depth
SCREAMING_SNAKE_CASE_: List[Any] = expand_ratio
SCREAMING_SNAKE_CASE_: List[str] = output_stride
SCREAMING_SNAKE_CASE_: int = first_layer_is_expansion
SCREAMING_SNAKE_CASE_: List[Any] = finegrained_output
SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_: List[Any] = tf_padding
SCREAMING_SNAKE_CASE_: Any = classifier_dropout_prob
SCREAMING_SNAKE_CASE_: Dict = initializer_range
SCREAMING_SNAKE_CASE_: str = layer_norm_eps
SCREAMING_SNAKE_CASE_: Optional[Any] = semantic_loss_ignore_index
class __lowercase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_UpperCAmelCase : Dict = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return OrderedDict([("pixel_values", {0: "batch"})])
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})])
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})])
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return 1E-4
| 671 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : str = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 384}
A : Optional[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Dict = size
# Default value set here for backwards compatibility where the value in config is None
A : Dict = crop_pct if crop_pct is not None else 224 / 256
A : Optional[int] = resample
A : List[str] = do_rescale
A : Tuple = rescale_factor
A : Optional[int] = do_normalize
A : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
A : List[str] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : int = int(shortest_edge / crop_pct )
A : List[Any] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Dict = do_resize if do_resize is not None else self.do_resize
A : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
A : str = resample if resample is not None else self.resample
A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Dict = do_normalize if do_normalize is not None else self.do_normalize
A : List[str] = image_mean if image_mean is not None else self.image_mean
A : Optional[Any] = image_std if image_std is not None else self.image_std
A : Optional[Any] = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : List[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : str = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Tuple = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
| 662 | 0 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class snake_case :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def snake_case__ ( self ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = LlamaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
__lowercase = True
__lowercase = LlamaModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , )
__lowercase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , )
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
__lowercase = LlamaForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
__lowercase = True
__lowercase = True
__lowercase = LlamaForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# first forward pass
__lowercase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ , )
__lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
__lowercase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
__lowercase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
# select random slice
__lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
def snake_case__ ( self ):
__lowercase = self.prepare_config_and_inputs()
(
__lowercase
) = config_and_inputs
__lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__lowerCAmelCase = (LlamaForCausalLM,) if is_torch_available() else ()
__lowerCAmelCase = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
def snake_case__ ( self ):
__lowercase = LlamaModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = input_dict["""input_ids"""]
__lowercase = input_ids.ne(1 ).to(lowerCamelCase__ )
__lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowercase = LlamaForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__ ( self ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = """single_label_classification"""
__lowercase = input_dict["""input_ids"""]
__lowercase = input_ids.ne(1 ).to(lowerCamelCase__ )
__lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowercase = LlamaForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__ ( self ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = """multi_label_classification"""
__lowercase = input_dict["""input_ids"""]
__lowercase = input_ids.ne(1 ).to(lowerCamelCase__ )
__lowercase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__lowercase = LlamaForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def snake_case__ ( self ):
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = ids_tensor([1, 10] , config.vocab_size )
__lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase = LlamaModel(lowerCamelCase__ )
original_model.to(lowerCamelCase__ )
original_model.eval()
__lowercase = original_model(lowerCamelCase__ ).last_hidden_state
__lowercase = original_model(lowerCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase = {"""type""": scaling_type, """factor""": 10.0}
__lowercase = LlamaModel(lowerCamelCase__ )
scaled_model.to(lowerCamelCase__ )
scaled_model.eval()
__lowercase = scaled_model(lowerCamelCase__ ).last_hidden_state
__lowercase = scaled_model(lowerCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 ) )
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def snake_case__ ( self ):
__lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__lowercase = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
__lowercase = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__lowercase = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__lowercase = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def snake_case__ ( self ):
__lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__lowercase = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
__lowercase = model(torch.tensor(lowerCamelCase__ ) )
# Expected mean on dim = -1
__lowercase = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__lowercase = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def snake_case__ ( self ):
__lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__lowercase = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
__lowercase = model(torch.tensor(lowerCamelCase__ ) )
# Expected mean on dim = -1
__lowercase = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__lowercase = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase__ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def snake_case__ ( self ):
__lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__lowercase = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
__lowercase = model(torch.tensor(lowerCamelCase__ ) )
__lowercase = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase__ , atol=1E-2 , rtol=1E-2 )
# fmt: off
__lowercase = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Model is curently gated" )
@slow
def snake_case__ ( self ):
__lowercase = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
__lowercase = """Simply put, the theory of relativity states that """
__lowercase = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
__lowercase = tokenizer.encode(lowerCamelCase__ , return_tensors="pt" )
__lowercase = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=lowerCamelCase__ )
# greedy generation outputs
__lowercase = model.generate(lowerCamelCase__ , max_new_tokens=64 , top_p=lowerCamelCase__ , temperature=1 , do_sample=lowerCamelCase__ )
__lowercase = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 321 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
A : Dict = """backbone.""" if is_semantic else """"""
A : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
A : Dict = """backbone.""" if is_semantic else """"""
# queries, keys and values
A : Union[str, Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A : Optional[int] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A : int = in_proj_weight[
: config.hidden_size, :
]
A : Any = q_bias
A : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A : Tuple = in_proj_weight[
-config.hidden_size :, :
]
A : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A : str = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A : Dict = gamma_a
A : Dict = gamma_a
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : List[str] = dct.pop(_lowerCAmelCase )
A : Optional[Any] = val
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
A : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
"""simple docstring"""
A : Dict = False if """rvlcdip""" in checkpoint_url else True
A : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A : Dict = 1024
A : List[Any] = 4096
A : int = 24
A : int = 16
# labels
if "rvlcdip" in checkpoint_url:
A : List[Any] = 16
A : List[Any] = """huggingface/label-files"""
A : int = """rvlcdip-id2label.json"""
A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A : int = idalabel
A : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
A : str = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase )
# load HuggingFace model
A : Any = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A : Any = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase )
A : int = prepare_img()
A : Tuple = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
A : str = encoding["""pixel_values"""]
A : Tuple = model(_lowerCAmelCase )
A : Optional[int] = outputs.logits
# verify logits
A : Tuple = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
A : Any = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
A : List[Any] = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 662 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 175 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""", lowerCamelCase__, )
super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
| 662 | 0 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_UpperCAmelCase : Dict = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] = None , UpperCAmelCase : Union[str, Any] = None , UpperCAmelCase : Union[str, Any] = None , UpperCAmelCase : Optional[int] = True , ) -> Tuple:
lowerCamelCase__ : Tuple = [file for file in os.listdir(lowerCamelCase__ ) if os.path.isfile(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) )]
if identifier is not None:
lowerCamelCase__ : Dict = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
for n_ in n_identifier:
lowerCamelCase__ : Dict = [file for file in files if n_ not in file]
else:
lowerCamelCase__ : Optional[int] = [file for file in files if n_identifier not in file]
lowerCamelCase__ : Union[str, Any] = ignore_files or []
ignore_files.append('__init__.py' )
lowerCamelCase__ : Optional[int] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowerCamelCase__ )
if only_modules:
lowerCamelCase__ : List[Any] = file.split('.' )[0]
try:
lowerCamelCase__ : Tuple = getattr(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase__ : Dict = doctest.DocTestSuite(lowerCamelCase__ )
lowerCamelCase__ : Any = unittest.TextTestRunner().run(lowerCamelCase__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
lowerCamelCase__ : List[str] = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def A_ ( self : int ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = Path('src/transformers' )
lowerCamelCase__ : Optional[int] = """modeling"""
lowerCamelCase__ : Optional[int] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(lowerCamelCase__ , identifier=lowerCamelCase__ , ignore_files=lowerCamelCase__ )
def A_ ( self : Dict ) -> int:
lowerCamelCase__ : Optional[Any] = Path('src/transformers' )
lowerCamelCase__ : Tuple = """tokenization"""
self.analyze_directory(lowerCamelCase__ , identifier=lowerCamelCase__ )
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : Tuple = Path('src/transformers' )
lowerCamelCase__ : Any = """configuration"""
self.analyze_directory(lowerCamelCase__ , identifier=lowerCamelCase__ )
def A_ ( self : Dict ) -> List[str]:
lowerCamelCase__ : Any = Path('src/transformers' )
lowerCamelCase__ : List[Any] = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(lowerCamelCase__ , n_identifier=lowerCamelCase__ )
def A_ ( self : List[Any] ) -> Tuple:
lowerCamelCase__ : Optional[int] = Path('docs/source' )
lowerCamelCase__ : Tuple = ["""favicon.ico"""]
self.analyze_directory(lowerCamelCase__ , ignore_files=lowerCamelCase__ , only_modules=lowerCamelCase__ )
| 295 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(
lowerCamelCase__, split=lowerCamelCase__, features=lowerCamelCase__, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__, streaming=lowerCamelCase__, num_proc=lowerCamelCase__, **lowerCamelCase__, )
A : List[Any] = path_or_paths if isinstance(lowerCamelCase__, lowerCamelCase__ ) else {self.split: path_or_paths}
A : str = Text(
cache_dir=lowerCamelCase__, data_files=lowerCamelCase__, features=lowerCamelCase__, **lowerCamelCase__, )
def _lowerCAmelCase ( self ):
# Build iterable dataset
if self.streaming:
A : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A : List[str] = None
A : Dict = None
A : Tuple = None
A : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__, download_mode=lowerCamelCase__, verification_mode=lowerCamelCase__, base_path=lowerCamelCase__, num_proc=self.num_proc, )
A : List[str] = self.builder.as_dataset(
split=self.split, verification_mode=lowerCamelCase__, in_memory=self.keep_in_memory )
return dataset
| 662 | 0 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCAmelCase : Union[str, Any] = "Speech2TextFeatureExtractor"
_lowerCAmelCase : Optional[int] = "Speech2TextTokenizer"
def __init__( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : int ):
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
snake_case_ : Any = self.feature_extractor
snake_case_ : Optional[Any] = False
def __call__( self : Tuple , *lowercase_ : Optional[int] , **lowercase_ : Union[str, Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase__ , **lowerCamelCase__ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
snake_case_ : Optional[Any] = kwargs.pop('''raw_speech''' )
else:
snake_case_ : Optional[Any] = kwargs.pop('''audio''' , lowerCamelCase__ )
snake_case_ : Any = kwargs.pop('''sampling_rate''' , lowerCamelCase__ )
snake_case_ : Tuple = kwargs.pop('''text''' , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
snake_case_ : List[str] = args[0]
snake_case_ : Any = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
snake_case_ : List[str] = self.feature_extractor(lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None:
snake_case_ : List[str] = self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case_ : int = encodings["""input_ids"""]
return inputs
def _snake_case ( self : int , *lowercase_ : Optional[Any] , **lowercase_ : List[str] ):
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def _snake_case ( self : List[str] , *lowercase_ : List[Any] , **lowercase_ : str ):
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@contextmanager
def _snake_case ( self : List[str] ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
snake_case_ : Any = True
snake_case_ : Any = self.tokenizer
yield
snake_case_ : Any = self.feature_extractor
snake_case_ : List[str] = False
| 123 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
SCREAMING_SNAKE_CASE_:int = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 662 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_snake_case = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_snake_case = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_snake_case = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Any , __lowerCamelCase: Dict , __lowerCamelCase: Optional[int] = 1 , __lowerCamelCase: Optional[int] = 4 , ) -> Any:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCamelCase__ , hypotheses=lowerCamelCase__ , min_len=lowerCamelCase__ , max_len=lowerCamelCase__ )
}
| 382 |
def __UpperCamelCase ( _lowerCAmelCase = 1000 ) -> int:
"""simple docstring"""
A , A : str = 1, 1
A : List[Any] = []
for i in range(1 , n + 1 ):
A : Optional[int] = prev_numerator + 2 * prev_denominator
A : Any = prev_numerator + prev_denominator
if len(str(_lowerCAmelCase ) ) > len(str(_lowerCAmelCase ) ):
result.append(_lowerCAmelCase )
A : int = numerator
A : int = denominator
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 662 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = """Hello, World!"""
a__ = """en_XX"""
def _UpperCAmelCase ( a : Dict , a : Union[str, Any] , a : Optional[Any] ):
snake_case__ = Path("""data_bin""" )
snake_case__ = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
snake_case__ = xmod.model.encoder.sentence_encoder
snake_case__ = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
snake_case__ = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowerCAmelCase )
snake_case__ = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case__ = xmod_sent_encoder.embed_tokens.weight
snake_case__ = xmod_sent_encoder.embed_positions.weight
snake_case__ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
snake_case__ = xmod_sent_encoder.layernorm_embedding.weight
snake_case__ = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case__ = model.roberta.encoder.layer[i]
snake_case__ = xmod_sent_encoder.layers[i]
# self attention
snake_case__ = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
snake_case__ = xmod_layer.self_attn.q_proj.weight
snake_case__ = xmod_layer.self_attn.q_proj.bias
snake_case__ = xmod_layer.self_attn.k_proj.weight
snake_case__ = xmod_layer.self_attn.k_proj.bias
snake_case__ = xmod_layer.self_attn.v_proj.weight
snake_case__ = xmod_layer.self_attn.v_proj.bias
# self-attention output
snake_case__ = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
snake_case__ = xmod_layer.self_attn.out_proj.weight
snake_case__ = xmod_layer.self_attn.out_proj.bias
snake_case__ = xmod_layer.self_attn_layer_norm.weight
snake_case__ = xmod_layer.self_attn_layer_norm.bias
# intermediate
snake_case__ = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
snake_case__ = xmod_layer.fca.weight
snake_case__ = xmod_layer.fca.bias
# output
snake_case__ = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
snake_case__ = xmod_layer.fca.weight
snake_case__ = xmod_layer.fca.bias
snake_case__ = xmod_layer.final_layer_norm.weight
snake_case__ = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
snake_case__ = xmod_layer.adapter_layer_norm.weight
snake_case__ = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
snake_case__ = bert_output.adapter_modules[lang_code]
snake_case__ = xmod_layer.adapter_modules[lang_code]
snake_case__ = from_adapter.fca.weight
snake_case__ = from_adapter.fca.bias
snake_case__ = from_adapter.fca.weight
snake_case__ = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
snake_case__ = xmod_sent_encoder.layer_norm.weight
snake_case__ = xmod_sent_encoder.layer_norm.bias
if classification_head:
snake_case__ = xmod.model.classification_heads["""mnli"""].dense.weight
snake_case__ = xmod.model.classification_heads["""mnli"""].dense.bias
snake_case__ = xmod.model.classification_heads["""mnli"""].out_proj.weight
snake_case__ = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
snake_case__ = xmod.model.encoder.lm_head.dense.weight
snake_case__ = xmod.model.encoder.lm_head.dense.bias
snake_case__ = xmod.model.encoder.lm_head.layer_norm.weight
snake_case__ = xmod.model.encoder.lm_head.layer_norm.bias
snake_case__ = xmod.model.encoder.lm_head.weight
snake_case__ = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case__ = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
snake_case__ = model(_lowerCAmelCase )[0]
if classification_head:
snake_case__ = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) )
else:
snake_case__ = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
snake_case__ = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
snake_case__ = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
a__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 654 |
import re
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
if len(re.findall("""[ATCG]""" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 0 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def lowerCAmelCase_ ( __a , __a ) -> Dict:
"""simple docstring"""
lowerCamelCase__: int =Mock()
lowerCamelCase__: Tuple =conn, Mock()
lowerCamelCase__: Optional[int] =iter([1, None] )
lowerCamelCase__: str =lambda __a : next(_lowerCAmelCase )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=_lowerCAmelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 59 |
from __future__ import annotations
SCREAMING_SNAKE_CASE_:Tuple = """#"""
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self ):
A : dict = {}
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = self._trie
for char in text:
if char not in trie:
A : str = {}
A : str = trie[char]
A : Optional[int] = True
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Dict = self._trie
for char in prefix:
if char in trie:
A : Optional[Any] = trie[char]
else:
return []
return self._elements(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : int = []
for c, v in d.items():
A : List[Any] = [""" """] if c == END else [(c + s) for s in self._elements(lowerCamelCase__ )]
result.extend(lowerCamelCase__ )
return tuple(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_:Any = Trie()
SCREAMING_SNAKE_CASE_:Tuple = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple:
"""simple docstring"""
A : List[str] = trie.find_word(_lowerCAmelCase )
return tuple(string + word for word in suffixes )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 662 | 0 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCamelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
class UpperCamelCase__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = PandasConfig
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
SCREAMING_SNAKE_CASE : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase__ , (str, list, tuple) ):
SCREAMING_SNAKE_CASE : Union[str, Any] = data_files
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
SCREAMING_SNAKE_CASE : str = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
SCREAMING_SNAKE_CASE : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE : Union[str, Any] = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCamelCase__ , gen_kwargs={"""files""": files} ) )
return splits
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE : List[str] = table_cast(lowerCamelCase__ , self.config.features.arrow_schema )
return pa_table
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(lowerCamelCase__ ) ):
with open(lowerCamelCase__ , """rb""" ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = pa.Table.from_pandas(pd.read_pickle(lowerCamelCase__ ) )
yield i, self._cast_table(lowerCamelCase__ )
| 379 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
SCREAMING_SNAKE_CASE_:Optional[int] = logging.getLogger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = bnb_quantization_config.load_in_abit
A : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
A : Any = []
# custom device map
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(device_map.keys() ) > 1:
A : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A : int = get_keys_to_not_convert(_lowerCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowerCAmelCase )
A : Optional[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A : Dict = []
A : Tuple = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowerCAmelCase )
# compatibility with peft
A : Union[str, Any] = load_in_abit
A : Tuple = load_in_abit
A : List[str] = get_parameter_device(_lowerCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
A : Optional[int] = replace_with_bnb_layers(_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
# convert param to the right dtype
A : Tuple = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A : Optional[Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
A : int = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowerCAmelCase ):
param.to(_lowerCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
A : str = replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
A : Optional[Any] = get_quantized_model_device_map(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_memory=_lowerCAmelCase , no_split_module_classes=_lowerCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A : Tuple = True
A : int = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCAmelCase , offload_state_dict=_lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowerCAmelCase , device_map=_lowerCAmelCase , offload_dir=_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[int]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
A : Optional[int] = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
A : Tuple = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A : Any = {}
A : List[str] = special_dtypes
A : Any = no_split_module_classes
A : Union[str, Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A : Tuple = get_balanced_memory(
_lowerCAmelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=_lowerCAmelCase , **_lowerCAmelCase , )
A : int = max_memory
A : Any = infer_auto_device_map(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# check if don't have any quantized module on the cpu
A : Optional[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A : Optional[int] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
A : Optional[Any] = []
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> int:
"""simple docstring"""
A : Optional[int] = False
for name, module in model.named_children():
if current_key_name is None:
A : int = []
current_key_name.append(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A : Dict = """.""".join(_lowerCAmelCase )
A : Optional[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A : Dict = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A : Optional[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
A : Any = module.weight.data
if module.bias is not None:
A : Any = module.bias.data
bnb_module.requires_grad_(_lowerCAmelCase )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Dict = True
if len(list(module.children() ) ) > 0:
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __UpperCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
with init_empty_weights():
A : Tuple = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A : Optional[int] = find_tied_parameters(_lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A : Optional[int] = sum(_lowerCAmelCase , [] )
A : Tuple = len(_lowerCAmelCase ) > 0
# Check if it is a base model
A : List[str] = False
if hasattr(_lowerCAmelCase , """base_model_prefix""" ):
A : Optional[Any] = not hasattr(_lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A : str = list(model.named_children() )
A : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
A : int = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
A : Optional[Any] = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase )
# remove ".weight" from the keys
A : Union[str, Any] = [""".weight""", """.bias"""]
A : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A : List[str] = name.replace(_lowerCAmelCase , """""" )
filtered_module_names.append(_lowerCAmelCase )
return filtered_module_names
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
for m in model.modules():
if isinstance(_lowerCAmelCase , bnb.nn.Linearabit ):
return True
return False
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return next(parameter.parameters() ).device
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , 0 , dtype=_lowerCAmelCase , value=_lowerCAmelCase )
A : Tuple = param_name
A : Union[str, Any] = model
if "." in tensor_name:
A : int = tensor_name.split(""".""" )
for split in splits[:-1]:
A : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
A : Optional[Any] = new_module
A : List[str] = splits[-1]
# offload weights
A : Optional[int] = False
offload_weight(module._parameters[tensor_name] , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase , )
else:
offload_weight(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
offload_weight(_lowerCAmelCase , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase )
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , """meta""" , dtype=_lowerCAmelCase , value=torch.empty(*param.size() ) )
| 662 | 0 |
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__=1e-1_2 ) -> Tuple:
'''simple docstring'''
a__ = jnp.divide(emb_a.T,jnp.clip(jnp.linalg.norm(_lowerCAmelCase,axis=1 ),a_min=_lowerCAmelCase ) ).T
a__ = jnp.divide(emb_a.T,jnp.clip(jnp.linalg.norm(_lowerCAmelCase,axis=1 ),a_min=_lowerCAmelCase ) ).T
return jnp.matmul(_lowerCAmelCase,norm_emb_a.T )
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
a_ : CLIPConfig
a_ : jnp.dtype =jnp.floataa
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
a__ = FlaxCLIPVisionModule(self.config.vision_config )
a__ = nn.Dense(self.config.projection_dim , use_bias=lowerCamelCase__ , dtype=self.dtype )
a__ = self.param('concept_embeds' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
a__ = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
a__ = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (17,) )
a__ = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) )
def __call__( self : str , _snake_case : List[Any] ) -> Any:
'''simple docstring'''
a__ = self.vision_model(lowerCamelCase__ )[1]
a__ = self.visual_projection(lowerCamelCase__ )
a__ = jax_cosine_distance(lowerCamelCase__ , self.special_care_embeds )
a__ = jax_cosine_distance(lowerCamelCase__ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
a__ = 0.0
a__ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
a__ = jnp.round(lowerCamelCase__ , 3 )
a__ = jnp.any(special_scores > 0 , axis=1 , keepdims=lowerCamelCase__ )
# Use a lower threshold if an image has any special care concept
a__ = is_special_care * 0.01
a__ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
a__ = jnp.round(lowerCamelCase__ , 3 )
a__ = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
a_ : Dict =CLIPConfig
a_ : Any ="clip_input"
a_ : List[str] =FlaxStableDiffusionSafetyCheckerModule
def __init__( self : List[str] , _snake_case : List[Any] , _snake_case : List[str] = None , _snake_case : List[Any] = 0 , _snake_case : Any = jnp.floataa , _snake_case : Tuple = True , **_snake_case : int , ) -> str:
'''simple docstring'''
if input_shape is None:
a__ = (1, 224, 224, 3)
a__ = self.module_class(config=lowerCamelCase__ , dtype=lowerCamelCase__ , **lowerCamelCase__ )
super().__init__(lowerCamelCase__ , lowerCamelCase__ , input_shape=lowerCamelCase__ , seed=lowerCamelCase__ , dtype=lowerCamelCase__ , _do_init=_do_init )
def _lowerCAmelCase ( self : Dict , _snake_case : str , _snake_case : List[Any] , _snake_case : Any = None ) -> Tuple:
'''simple docstring'''
a__ = jax.random.normal(lowerCamelCase__ , lowerCamelCase__ )
a__ = jax.random.split(lowerCamelCase__ )
a__ = {"""params""": params_rng, """dropout""": dropout_rng}
a__ = self.module.init(lowerCamelCase__ , lowerCamelCase__ )["""params"""]
return random_params
def __call__( self : List[Any] , _snake_case : int , _snake_case : str = None , ) -> List[str]:
'''simple docstring'''
a__ = jnp.transpose(lowerCamelCase__ , (0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} , jnp.array(lowerCamelCase__ , dtype=jnp.floataa ) , rngs={} , )
| 232 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
A : Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
A : Dict = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
A : Tuple = parser.parse_args()
if not hasattr(_lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
A : Any = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 662 | 0 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ : Optional[int] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class snake_case_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = XLMProphetNetTokenizer
__UpperCamelCase = False
__UpperCamelCase = True
def UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = XLMProphetNetTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
__lowercase = """[PAD]"""
__lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '[PAD]' )
self.assertEqual(vocab_keys[1] , '[CLS]' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(lowerCamelCase__ ) , 1_012 )
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def UpperCAmelCase ( self : Optional[int] ) -> Any:
'''simple docstring'''
__lowercase = XLMProphetNetTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
__lowercase = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowercase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__lowercase = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__lowercase = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__lowercase = """Hello World!"""
__lowercase = [35_389, 6_672, 49, 2]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__lowercase = {"""input_ids""": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 375 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:int = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __lowercase ( TensorFormatter[Mapping, '''torch.Tensor''', Mapping] ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : int):
super().__init__(features=lowerCamelCase__)
SCREAMING_SNAKE_CASE_: Any = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Optional[int]):
import torch
if isinstance(lowerCamelCase__ , lowerCamelCase__) and column:
if all(
isinstance(lowerCamelCase__ , torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column):
return torch.stack(lowerCamelCase__)
return column
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Union[str, Any]):
import torch
if isinstance(lowerCamelCase__ , (str, bytes, type(lowerCamelCase__))):
return value
elif isinstance(lowerCamelCase__ , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
SCREAMING_SNAKE_CASE_: Optional[Any] = {}
if isinstance(lowerCamelCase__ , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
SCREAMING_SNAKE_CASE_: List[str] = {"""dtype""": torch.intaa}
elif isinstance(lowerCamelCase__ , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
SCREAMING_SNAKE_CASE_: str = {"""dtype""": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCamelCase__ , PIL.Image.Image):
SCREAMING_SNAKE_CASE_: Optional[Any] = np.asarray(lowerCamelCase__)
return torch.tensor(lowerCamelCase__ , **{**default_dtype, **self.torch_tensor_kwargs})
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Tuple):
import torch
# support for torch, tf, jax etc.
if hasattr(lowerCamelCase__ , "__array__") and not isinstance(lowerCamelCase__ , torch.Tensor):
SCREAMING_SNAKE_CASE_: Any = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCamelCase__ , np.ndarray):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCamelCase__) for substruct in data_struct])
elif isinstance(lowerCamelCase__ , (list, tuple)):
return self._consolidate([self.recursive_tensorize(lowerCamelCase__) for substruct in data_struct])
return self._tensorize(lowerCamelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]):
return map_nested(self._recursive_tensorize , lowerCamelCase__ , map_list=lowerCamelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[int] = self.numpy_arrow_extractor().extract_row(lowerCamelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = self.python_features_decoder.decode_row(lowerCamelCase__)
return self.recursive_tensorize(lowerCamelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: str = self.numpy_arrow_extractor().extract_column(lowerCamelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = self.python_features_decoder.decode_column(lowerCamelCase__ , pa_table.column_names[0])
SCREAMING_SNAKE_CASE_: Optional[Any] = self.recursive_tensorize(lowerCamelCase__)
SCREAMING_SNAKE_CASE_: Any = self._consolidate(lowerCamelCase__)
return column
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Tuple = self.numpy_arrow_extractor().extract_batch(lowerCamelCase__)
SCREAMING_SNAKE_CASE_: Dict = self.python_features_decoder.decode_batch(lowerCamelCase__)
SCREAMING_SNAKE_CASE_: Any = self.recursive_tensorize(lowerCamelCase__)
for column_name in batch:
SCREAMING_SNAKE_CASE_: Tuple = self._consolidate(batch[column_name])
return batch
| 671 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
A : Optional[int] = int(_lowerCAmelCase )
# Initialize Result
A : int = []
# Traverse through all denomination
for denomination in reversed(_lowerCAmelCase ):
# Find denominations
while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ):
total_value -= int(_lowerCAmelCase )
answer.append(_lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = []
SCREAMING_SNAKE_CASE_:Dict = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
SCREAMING_SNAKE_CASE_:Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F"""Following is minimal change for {value}: """)
SCREAMING_SNAKE_CASE_:str = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 662 | 0 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = TypeVar('DatasetType', Dataset, IterableDataset)
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(_lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , (Dataset, IterableDataset) ):
if isinstance(_lowerCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
"is an empty dataset dictionary." )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(_lowerCAmelCase )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_lowerCAmelCase ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_lowerCAmelCase ).__name__}.''' )
if i == 0:
__lowercase = (
(Dataset, IterableDataset) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , info=_lowerCAmelCase , split=_lowerCAmelCase , stopping_strategy=_lowerCAmelCase )
else:
return _interleave_iterable_datasets(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , info=_lowerCAmelCase , split=_lowerCAmelCase , stopping_strategy=_lowerCAmelCase )
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(_lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , (Dataset, IterableDataset) ):
if isinstance(_lowerCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
"is an empty dataset dictionary." )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(_lowerCAmelCase )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_lowerCAmelCase ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_lowerCAmelCase ).__name__}.''' )
if i == 0:
__lowercase = (
(Dataset, IterableDataset) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_lowerCAmelCase , info=_lowerCAmelCase , split=_lowerCAmelCase , axis=_lowerCAmelCase )
else:
return _concatenate_iterable_datasets(_lowerCAmelCase , info=_lowerCAmelCase , split=_lowerCAmelCase , axis=_lowerCAmelCase )
| 321 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_:Union[str, Any] = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE_:Any = dict(zip(vocab, range(len(vocab))))
SCREAMING_SNAKE_CASE_:Dict = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_:List[Any] = Path(tmpdirname)
SCREAMING_SNAKE_CASE_:str = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
SCREAMING_SNAKE_CASE_:Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
SCREAMING_SNAKE_CASE_:Any = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
SCREAMING_SNAKE_CASE_:Optional[Any] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
SCREAMING_SNAKE_CASE_:Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
SCREAMING_SNAKE_CASE_:str = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 662 | 0 |
import argparse
import os
import re
a_ = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ = re.compile(R"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
a_ = re.compile(R"""\s*\(\s*\"(\S[^\"]+)\"""")
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Optional[Any] = False ):
with open(_lowerCAmelCase ,'''r''' ,encoding='''utf-8''' ) as f:
__lowerCamelCase = f.read()
__lowerCamelCase = content.split('''\n''' )
__lowerCamelCase = []
__lowerCamelCase = 0
while line_idx < len(_lowerCAmelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__lowerCamelCase = len(re.search(R'''^(\s*)\S''' ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__lowerCamelCase = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__lowerCamelCase = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__lowerCamelCase = sorted(_lowerCAmelCase ,key=lambda _UpperCamelCase : _re_identifier.search(_lowerCAmelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCAmelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_lowerCAmelCase ) )
elif "\n".join(_lowerCAmelCase ) != content:
return True
def a__ ( _UpperCamelCase : List[Any] = False ):
__lowerCamelCase = [os.path.join(_lowerCAmelCase ,_lowerCAmelCase ) for f in os.listdir(_lowerCAmelCase ) if f.endswith('''.py''' )]
__lowerCamelCase = [sort_auto_mapping(_lowerCAmelCase ,overwrite=_lowerCAmelCase ) for fname in fnames]
if not overwrite and any(_lowerCAmelCase ):
__lowerCamelCase = [f for f, d in zip(_lowerCAmelCase ,_lowerCAmelCase ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {", ".join(_lowerCAmelCase )}. Run `make style` to fix"""
''' this.''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
a_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 175 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = """Hello, World!"""
SCREAMING_SNAKE_CASE_:List[Any] = """en_XX"""
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Optional[int] = Path("""data_bin""" )
A : Optional[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
A : Any = xmod.model.encoder.sentence_encoder
A : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowerCAmelCase )
A : int = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A : Any = xmod_sent_encoder.embed_tokens.weight
A : int = xmod_sent_encoder.embed_positions.weight
A : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A : Dict = xmod_sent_encoder.layernorm_embedding.weight
A : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A : str = model.roberta.encoder.layer[i]
A : Tuple = xmod_sent_encoder.layers[i]
# self attention
A : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
A : List[str] = xmod_layer.self_attn.q_proj.weight
A : Optional[int] = xmod_layer.self_attn.q_proj.bias
A : List[Any] = xmod_layer.self_attn.k_proj.weight
A : Union[str, Any] = xmod_layer.self_attn.k_proj.bias
A : Optional[int] = xmod_layer.self_attn.v_proj.weight
A : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
A : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
A : Optional[Any] = xmod_layer.self_attn.out_proj.weight
A : Dict = xmod_layer.self_attn.out_proj.bias
A : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight
A : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
A : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
A : Optional[int] = xmod_layer.fca.weight
A : Optional[int] = xmod_layer.fca.bias
# output
A : Dict = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
A : Union[str, Any] = xmod_layer.fca.weight
A : int = xmod_layer.fca.bias
A : List[str] = xmod_layer.final_layer_norm.weight
A : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A : str = xmod_layer.adapter_layer_norm.weight
A : str = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A : Optional[int] = bert_output.adapter_modules[lang_code]
A : int = xmod_layer.adapter_modules[lang_code]
A : Optional[Any] = from_adapter.fca.weight
A : Optional[Any] = from_adapter.fca.bias
A : List[str] = from_adapter.fca.weight
A : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A : Dict = xmod_sent_encoder.layer_norm.weight
A : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
A : int = xmod.model.classification_heads["""mnli"""].dense.weight
A : Optional[Any] = xmod.model.classification_heads["""mnli"""].dense.bias
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
A : Any = xmod.model.encoder.lm_head.dense.weight
A : Tuple = xmod.model.encoder.lm_head.dense.bias
A : Any = xmod.model.encoder.lm_head.layer_norm.weight
A : List[str] = xmod.model.encoder.lm_head.layer_norm.bias
A : Union[str, Any] = xmod.model.encoder.lm_head.weight
A : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A : Optional[int] = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
A : List[str] = model(_lowerCAmelCase )[0]
if classification_head:
A : Dict = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) )
else:
A : Optional[Any] = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A : Optional[Any] = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 662 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase : str = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 295 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Any = tempfile.mkdtemp()
A : List[str] = BlipImageProcessor()
A : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
A : str = BlipProcessor(lowerCamelCase__, lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).tokenizer
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).image_processor
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Any = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : Any = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : int = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Any = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
A : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase__, padding_value=1.0 )
A : Dict = BlipProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : str = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = self.prepare_image_inputs()
A : int = image_processor(lowerCamelCase__, return_tensors="""np""" )
A : Optional[Any] = processor(images=lowerCamelCase__, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : List[str] = self.get_image_processor()
A : int = self.get_tokenizer()
A : str = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = """lower newer"""
A : List[Any] = processor(text=lowerCamelCase__ )
A : str = tokenizer(lowerCamelCase__, return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Union[str, Any] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : Union[str, Any] = self.prepare_image_inputs()
A : str = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Optional[int] = processor.batch_decode(lowerCamelCase__ )
A : Dict = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : int = self.get_tokenizer()
A : Optional[int] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : List[str] = self.prepare_image_inputs()
A : Optional[int] = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 662 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
def __lowercase ( _a , _a=False ):
snake_case_ : Any = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def __lowercase ( _a , _a , _a=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ : str = """"""
else:
snake_case_ : int = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ : str = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case_ : List[str] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
snake_case_ : Tuple = in_proj_bias[: config.hidden_size]
snake_case_ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ : str = in_proj_bias[-config.hidden_size :]
def __lowercase ( _a ):
snake_case_ : Any = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def __lowercase ( _a , _a , _a ):
snake_case_ : Dict = dct.pop(_lowerCAmelCase )
snake_case_ : List[str] = val
def __lowercase ( ):
snake_case_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ : Tuple = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __lowercase ( _a , _a , _a=False ):
snake_case_ : Optional[int] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=_lowerCAmelCase , )
snake_case_ : Dict = ViTHybridConfig(backbone_config=_lowerCAmelCase , image_size=384 , num_labels=1_000 )
snake_case_ : Union[str, Any] = False
# load original model from timm
snake_case_ : Any = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ : Union[str, Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
snake_case_ : Tuple = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
snake_case_ : Optional[int] = """huggingface/label-files"""
snake_case_ : Any = """imagenet-1k-id2label.json"""
snake_case_ : Any = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ : Union[str, Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case_ : Any = idalabel
snake_case_ : List[Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case_ : str = ViTHybridModel(_lowerCAmelCase ).eval()
else:
snake_case_ : int = ViTHybridForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# create image processor
snake_case_ : str = create_transform(**resolve_data_config({} , model=_lowerCAmelCase ) )
snake_case_ : Tuple = transform.transforms
snake_case_ : Any = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case_ : Union[str, Any] = ViTHybridImageProcessor(
do_resize=_lowerCAmelCase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCAmelCase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_lowerCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case_ : Union[str, Any] = prepare_img()
snake_case_ : Optional[int] = transform(_lowerCAmelCase ).unsqueeze(0 )
snake_case_ : str = processor(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase )
# verify logits
with torch.no_grad():
snake_case_ : Optional[int] = model(_lowerCAmelCase )
snake_case_ : Dict = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
snake_case_ : Tuple = timm_model.forward_features(_lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
snake_case_ : Union[str, Any] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 123 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
return f'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 4, 64, 64), lowerCamelCase__=False ):
A : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return image
def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__="CompVis/stable-diffusion-v1-4" ):
A : str = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = """bf16""" if fpaa else None
A , A : str = FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase__, subfolder="""unet""", dtype=lowerCamelCase__, revision=lowerCamelCase__ )
return model, params
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 77, 768), lowerCamelCase__=False ):
A : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
A : List[str] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : List[str] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""", fpaa=lowerCamelCase__ )
A : str = self.get_latents(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : int = self.get_encoder_hidden_states(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : Optional[Any] = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : Dict = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""", fpaa=lowerCamelCase__ )
A : int = self.get_latents(lowerCamelCase__, shape=(4, 4, 96, 96), fpaa=lowerCamelCase__ )
A : Union[str, Any] = self.get_encoder_hidden_states(lowerCamelCase__, shape=(4, 77, 1024), fpaa=lowerCamelCase__ )
A : Dict = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : List[Any] = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
| 662 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class _snake_case ( SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__: Optional[int] = "ctrl"
lowerCamelCase__: List[str] = ["past_key_values"]
lowerCamelCase__: Any = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self: int , __lowerCamelCase: Dict=24_65_34 , __lowerCamelCase: Dict=2_56 , __lowerCamelCase: List[str]=12_80 , __lowerCamelCase: Tuple=81_92 , __lowerCamelCase: List[str]=48 , __lowerCamelCase: Any=16 , __lowerCamelCase: Any=0.1 , __lowerCamelCase: Tuple=0.1 , __lowerCamelCase: Dict=1e-6 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: Tuple=True , **__lowerCamelCase: str , ) -> str:
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : List[str] = n_positions
__UpperCAmelCase : Union[str, Any] = n_embd
__UpperCAmelCase : str = n_layer
__UpperCAmelCase : List[Any] = n_head
__UpperCAmelCase : Optional[int] = dff
__UpperCAmelCase : str = resid_pdrop
__UpperCAmelCase : Optional[Any] = embd_pdrop
__UpperCAmelCase : int = layer_norm_epsilon
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Optional[int] = use_cache
super().__init__(**lowerCamelCase__ )
| 382 |
from typing import Any
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Any = v.conjugate().T
A : List[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A : str = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 662 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowercase : Any = "naver-clova-ix/donut-base-finetuned-docvqa"
_lowercase : Union[str, Any] = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_lowercase : Any = "document_qa"
_lowercase : List[str] = AutoProcessor
_lowercase : Optional[int] = VisionEncoderDecoderModel
_lowercase : List[str] = ["image", "text"]
_lowercase : Optional[Any] = ["text"]
def __init__( self : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""")
super().__init__(*lowerCamelCase__ , **lowerCamelCase__)
def __magic_name__ ( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
snake_case__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
snake_case__ = task_prompt.replace("""{user_input}""" , lowerCamelCase__)
snake_case__ = self.pre_processor.tokenizer(
lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors="""pt""").input_ids
snake_case__ = self.pre_processor(lowerCamelCase__ , return_tensors="""pt""").pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
return self.model.generate(
inputs["""pixel_values"""].to(self.device) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=lowerCamelCase__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=lowerCamelCase__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=lowerCamelCase__ , ).sequences
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = self.pre_processor.batch_decode(lowerCamelCase__)[0]
snake_case__ = sequence.replace(self.pre_processor.tokenizer.eos_token , """""")
snake_case__ = sequence.replace(self.pre_processor.tokenizer.pad_token , """""")
snake_case__ = re.sub(R"""<.*?>""" , """""" , lowerCamelCase__ , count=1).strip() # remove first task start token
snake_case__ = self.pre_processor.tokenajson(lowerCamelCase__)
return sequence["answer"]
| 654 |
from __future__ import annotations
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
A , A : int = np.shape(_lowerCAmelCase )
if rows != columns:
A : Union[str, Any] = (
"""'table' has to be of square shaped array but got a """
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_lowerCAmelCase )
A : Union[str, Any] = np.zeros((rows, columns) )
A : Dict = np.zeros((rows, columns) )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
A : Any = (table[i][j] - total) / upper[j][j]
A : Union[str, Any] = 1
for j in range(_lowerCAmelCase , _lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
A : str = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__A = logging.getLogger(__name__)
def lowerCAmelCase_ ( __a , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = False , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Optional[int] =bnb_quantization_config.load_in_abit
lowerCamelCase__: int =bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
lowerCamelCase__: Any =[]
# custom device map
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(device_map.keys() ) > 1:
lowerCamelCase__: Optional[int] =[key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCamelCase__: int =get_keys_to_not_convert(_lowerCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowerCAmelCase )
lowerCamelCase__: Optional[Any] =bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCamelCase__: Dict =[]
lowerCamelCase__: Tuple =bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowerCAmelCase )
# compatibility with peft
lowerCamelCase__: Union[str, Any] =load_in_abit
lowerCamelCase__: Tuple =load_in_abit
lowerCamelCase__: List[str] =get_parameter_device(_lowerCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
lowerCamelCase__: Optional[int] =replace_with_bnb_layers(_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
# convert param to the right dtype
lowerCamelCase__: Tuple =bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCamelCase__: Optional[Any] =name.replace(".weight" , "" ).replace(".bias" , "" )
lowerCamelCase__: int =getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowerCAmelCase ):
param.to(_lowerCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
lowerCamelCase__: str =replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
lowerCamelCase__: Optional[Any] =get_quantized_model_device_map(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_memory=_lowerCAmelCase , no_split_module_classes=_lowerCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCamelCase__: Tuple =True
lowerCamelCase__: int =any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCAmelCase , offload_state_dict=_lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowerCAmelCase , device_map=_lowerCAmelCase , offload_dir=_lowerCAmelCase )
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=None ) -> Optional[int]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
lowerCamelCase__: Optional[int] ={"""""": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
lowerCamelCase__: Tuple ={}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCamelCase__: Any ={}
lowerCamelCase__: List[str] =special_dtypes
lowerCamelCase__: Any =no_split_module_classes
lowerCamelCase__: Union[str, Any] =bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCamelCase__: Tuple =get_balanced_memory(
_lowerCAmelCase , low_zero=(device_map == "balanced_low_0") , max_memory=_lowerCAmelCase , **_lowerCAmelCase , )
lowerCamelCase__: int =max_memory
lowerCamelCase__: Any =infer_auto_device_map(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# check if don't have any quantized module on the cpu
lowerCamelCase__: Optional[Any] =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCamelCase__: Optional[int] ={
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
lowerCamelCase__: Optional[Any] =[]
lowerCamelCase__: Dict =_replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[int] =False
for name, module in model.named_children():
if current_key_name is None:
lowerCamelCase__: int =[]
current_key_name.append(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCamelCase__: Dict =""".""".join(_lowerCAmelCase )
lowerCamelCase__: Optional[Any] =True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCamelCase__: Dict =False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCamelCase__: Optional[Any] =bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCamelCase__: Dict =bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
lowerCamelCase__: Any =module.weight.data
if module.bias is not None:
lowerCamelCase__: Any =module.bias.data
bnb_module.requires_grad_(_lowerCAmelCase )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__: Dict =True
if len(list(module.children() ) ) > 0:
lowerCamelCase__: Dict =_replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__: Union[str, Any] =has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase_ ( __a ) -> Union[str, Any]:
"""simple docstring"""
with init_empty_weights():
lowerCamelCase__: Tuple =deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCamelCase__: Optional[int] =find_tied_parameters(_lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__: int =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCamelCase__: Optional[int] =sum(_lowerCAmelCase , [] )
lowerCamelCase__: Tuple =len(_lowerCAmelCase ) > 0
# Check if it is a base model
lowerCamelCase__: List[str] =False
if hasattr(_lowerCAmelCase , "base_model_prefix" ):
lowerCamelCase__: Optional[Any] =not hasattr(_lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCamelCase__: str =list(model.named_children() )
lowerCamelCase__: Tuple =[list_modules[-1][0]]
# add last module together with tied weights
lowerCamelCase__: int =set(_lowerCAmelCase ) - set(_lowerCAmelCase )
lowerCamelCase__: Optional[Any] =list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase )
# remove ".weight" from the keys
lowerCamelCase__: Union[str, Any] =[""".weight""", """.bias"""]
lowerCamelCase__: Optional[int] =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCamelCase__: List[str] =name.replace(_lowerCAmelCase , "" )
filtered_module_names.append(_lowerCAmelCase )
return filtered_module_names
def lowerCAmelCase_ ( __a ) -> Optional[int]:
"""simple docstring"""
for m in model.modules():
if isinstance(_lowerCAmelCase , bnb.nn.Linearabit ):
return True
return False
def lowerCAmelCase_ ( __a ) -> Optional[int]:
"""simple docstring"""
return next(parameter.parameters() ).device
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a , __a ) -> List[Any]:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , 0 , dtype=_lowerCAmelCase , value=_lowerCAmelCase )
lowerCamelCase__: Tuple =param_name
lowerCamelCase__: Union[str, Any] =model
if "." in tensor_name:
lowerCamelCase__: int =tensor_name.split("." )
for split in splits[:-1]:
lowerCamelCase__: Union[str, Any] =getattr(_lowerCAmelCase , _lowerCAmelCase )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
lowerCamelCase__: Optional[Any] =new_module
lowerCamelCase__: List[str] =splits[-1]
# offload weights
lowerCamelCase__: Optional[int] =False
offload_weight(module._parameters[tensor_name] , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
if hasattr(module._parameters[tensor_name] , "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , _lowerCAmelCase , index=_lowerCAmelCase , )
else:
offload_weight(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
offload_weight(_lowerCAmelCase , param_name.replace("weight" , "SCB" ) , _lowerCAmelCase , index=_lowerCAmelCase )
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , "meta" , dtype=_lowerCAmelCase , value=torch.empty(*param.size() ) )
| 59 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None ):
A : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
A : Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
A : Any = math.ceil(val / multiple ) * multiple
return x
A : Optional[Any] = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size
A , A : List[Any] = get_image_size(_lowerCAmelCase )
A , A : List[Any] = output_size
# determine new height and width
A : Optional[int] = output_height / input_height
A : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
A : Any = scale_width
else:
# fit height
A : int = scale_height
A : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase )
A : int = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : int = size if size is not None else {"""height""": 384, """width""": 384}
A : str = get_size_dict(lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Optional[int] = size
A : Union[str, Any] = keep_aspect_ratio
A : int = ensure_multiple_of
A : Dict = resample
A : Optional[Any] = do_rescale
A : Any = rescale_factor
A : str = do_normalize
A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Dict = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
A : Optional[Any] = get_resize_output_image_size(
lowerCamelCase__, output_size=(size["""height"""], size["""width"""]), keep_aspect_ratio=lowerCamelCase__, multiple=lowerCamelCase__, )
return resize(lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A : str = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__ )
A : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
A : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
A : Tuple = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A : int = do_normalize if do_normalize is not None else self.do_normalize
A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
A : Optional[int] = image_std if image_std is not None else self.image_std
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : str = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Dict = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : Optional[Any] = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Dict = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCamelCase__ ):
A : int = target_sizes.numpy()
A : Union[str, Any] = []
for idx in range(len(lowerCamelCase__ ) ):
A : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="""bilinear""", align_corners=lowerCamelCase__ )
A : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
A : List[str] = logits.argmax(dim=1 )
A : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 662 | 0 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def __A ( lowerCamelCase_ ):
"""simple docstring"""
print("""Loading config file...""" )
def flatten_yaml_as_dict(lowerCamelCase_ , lowerCamelCase_="" , lowerCamelCase_="." ):
SCREAMING_SNAKE_CASE : Dict = []
for k, v in d.items():
SCREAMING_SNAKE_CASE : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(_lowerCAmelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(_lowerCAmelCase , _lowerCAmelCase , sep=_lowerCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(_lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = argparse.Namespace()
with open(_lowerCAmelCase , """r""" ) as yaml_file:
try:
SCREAMING_SNAKE_CASE : Any = yaml.load(_lowerCAmelCase , Loader=yaml.FullLoader )
SCREAMING_SNAKE_CASE : List[Any] = flatten_yaml_as_dict(_lowerCAmelCase )
for k, v in flat_cfg.items():
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(_lowerCAmelCase , str(_lowerCAmelCase ) ) )
return config
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTVaConfig()
SCREAMING_SNAKE_CASE : int = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
SCREAMING_SNAKE_CASE : Optional[int] = 10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
SCREAMING_SNAKE_CASE : List[Any] = 3_84
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = 2_56
SCREAMING_SNAKE_CASE : Any = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
SCREAMING_SNAKE_CASE : List[Any] = 2_10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
SCREAMING_SNAKE_CASE : Optional[Any] = 3_84
else:
SCREAMING_SNAKE_CASE : List[Any] = 2_56
SCREAMING_SNAKE_CASE : Optional[int] = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 1_51
SCREAMING_SNAKE_CASE : Optional[Any] = 5_12
SCREAMING_SNAKE_CASE : List[Any] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE : Optional[Any] = True
elif task_name.startswith("""voc_""" ):
SCREAMING_SNAKE_CASE : Optional[Any] = 21
SCREAMING_SNAKE_CASE : Union[str, Any] = 5_12
SCREAMING_SNAKE_CASE : Union[str, Any] = """pascal-voc-id2label.json"""
SCREAMING_SNAKE_CASE : int = True
# orig_config
SCREAMING_SNAKE_CASE : Union[str, Any] = load_orig_config_file(_lowerCAmelCase )
assert getattr(_lowerCAmelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
SCREAMING_SNAKE_CASE : List[str] = getattr(_lowerCAmelCase , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(_lowerCAmelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
SCREAMING_SNAKE_CASE : List[str] = getattr(_lowerCAmelCase , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
SCREAMING_SNAKE_CASE : Dict = getattr(_lowerCAmelCase , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
SCREAMING_SNAKE_CASE : Tuple = getattr(_lowerCAmelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
SCREAMING_SNAKE_CASE : str = getattr(_lowerCAmelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 )
SCREAMING_SNAKE_CASE : List[Any] = getattr(_lowerCAmelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
SCREAMING_SNAKE_CASE : Union[str, Any] = """huggingface/label-files"""
SCREAMING_SNAKE_CASE : str = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE : str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Optional[int] = idalabel
SCREAMING_SNAKE_CASE : List[str] = {v: k for k, v in idalabel.items()}
return config
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = dct.pop(_lowerCAmelCase )
SCREAMING_SNAKE_CASE : int = val
def __A ( lowerCamelCase_ , lowerCamelCase_=False ):
"""simple docstring"""
if base_model:
SCREAMING_SNAKE_CASE : Any = """"""
else:
SCREAMING_SNAKE_CASE : Dict = """mobilevitv2."""
SCREAMING_SNAKE_CASE : List[Any] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
SCREAMING_SNAKE_CASE : int = k[8:]
else:
SCREAMING_SNAKE_CASE : Optional[int] = k
if ".block." in k:
SCREAMING_SNAKE_CASE : Union[str, Any] = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
SCREAMING_SNAKE_CASE : List[Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
SCREAMING_SNAKE_CASE : Optional[Any] = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
SCREAMING_SNAKE_CASE : Optional[int] = k_new.replace("""conv_1.""" , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
SCREAMING_SNAKE_CASE : Optional[int] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
SCREAMING_SNAKE_CASE : int = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
SCREAMING_SNAKE_CASE : Any = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
SCREAMING_SNAKE_CASE : Any = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
SCREAMING_SNAKE_CASE : List[str] = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
SCREAMING_SNAKE_CASE : int = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
SCREAMING_SNAKE_CASE : List[str] = [0, 1]
elif i == 4:
SCREAMING_SNAKE_CASE : Any = [0, 1, 2, 3]
elif i == 5:
SCREAMING_SNAKE_CASE : List[Any] = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
SCREAMING_SNAKE_CASE : Union[str, Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
SCREAMING_SNAKE_CASE : Optional[int] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
SCREAMING_SNAKE_CASE : List[Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
SCREAMING_SNAKE_CASE : Tuple = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
SCREAMING_SNAKE_CASE : Optional[Any] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
SCREAMING_SNAKE_CASE : int = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
SCREAMING_SNAKE_CASE : Optional[Any] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
SCREAMING_SNAKE_CASE : Dict = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
SCREAMING_SNAKE_CASE : Optional[int] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
SCREAMING_SNAKE_CASE : Tuple = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
SCREAMING_SNAKE_CASE : Optional[Any] = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
SCREAMING_SNAKE_CASE : Tuple = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(_lowerCAmelCase )
for k in keys_to_ignore:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
SCREAMING_SNAKE_CASE : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = get_mobilevitva_config(_lowerCAmelCase , _lowerCAmelCase )
# load original state_dict
SCREAMING_SNAKE_CASE : str = torch.load(_lowerCAmelCase , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
SCREAMING_SNAKE_CASE : int = MobileViTVaForSemanticSegmentation(_lowerCAmelCase ).eval()
SCREAMING_SNAKE_CASE : int = False
else:
SCREAMING_SNAKE_CASE : int = MobileViTVaForImageClassification(_lowerCAmelCase ).eval()
SCREAMING_SNAKE_CASE : int = False
# remove and rename some keys of load the original model
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint
remove_unused_keys(_lowerCAmelCase )
SCREAMING_SNAKE_CASE : Any = create_rename_keys(_lowerCAmelCase , base_model=_lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load modified state_dict
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
SCREAMING_SNAKE_CASE : str = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE : Any = model(**_lowerCAmelCase )
# verify classification model
if task_name.startswith("""imagenet""" ):
SCREAMING_SNAKE_CASE : int = outputs.logits
SCREAMING_SNAKE_CASE : str = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] )
assert torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1E-4 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """
"""
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
"""
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
__UpperCAmelCase = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 379 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
# we need a list not a string, so do something to change the type
A : List[Any] = arr.split(""",""" )
def _lowerCAmelCase ( self ):
A : int = [int(self.array[0] )] * len(self.array )
A : Optional[Any] = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
A : Union[str, Any] = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
A : Dict = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:int = input("""please input some numbers:""")
SCREAMING_SNAKE_CASE_:Dict = SubArray(whole_array)
SCREAMING_SNAKE_CASE_:Optional[int] = array.solve_sub_array()
print(("""the results is:""", re))
| 662 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__ ) -> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
(a__) = extended_euclid(_lowerCAmelCase,a % b )
a__ = a // b
return (y, x - k * y)
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> int:
'''simple docstring'''
(a__) = extended_euclid(_lowerCAmelCase,_lowerCAmelCase )
a__ = na * na
a__ = ra * x * na + ra * y * na
return (n % m + m) % m
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__ ) -> int:
'''simple docstring'''
(a__) = extended_euclid(_lowerCAmelCase,_lowerCAmelCase )
if b < 0:
a__ = (b % n + n) % n
return b
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> int:
'''simple docstring'''
a__ = invert_modulo(_lowerCAmelCase,_lowerCAmelCase ), invert_modulo(_lowerCAmelCase,_lowerCAmelCase )
a__ = na * na
a__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 232 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:List[Any] = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "bit"
__lowerCamelCase : Union[str, Any] = ["preactivation", "bottleneck"]
__lowerCamelCase : Union[str, Any] = ["SAME", "VALID"]
def __init__( self, lowerCamelCase__=3, lowerCamelCase__=64, lowerCamelCase__=[256, 512, 1024, 2048], lowerCamelCase__=[3, 4, 6, 3], lowerCamelCase__="preactivation", lowerCamelCase__="relu", lowerCamelCase__=None, lowerCamelCase__=32, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__=32, lowerCamelCase__=1, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A : List[Any] = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
A : Dict = num_channels
A : List[Any] = embedding_size
A : Optional[Any] = hidden_sizes
A : str = depths
A : str = layer_type
A : Union[str, Any] = hidden_act
A : Any = global_padding
A : Optional[int] = num_groups
A : Dict = drop_path_rate
A : List[Any] = embedding_dynamic_padding
A : List[Any] = output_stride
A : Union[str, Any] = width_factor
A : Dict = ["""stem"""] + [f'''stage{idx}''' for idx in range(1, len(lowerCamelCase__ ) + 1 )]
A , A : Any = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__, out_indices=lowerCamelCase__, stage_names=self.stage_names )
| 662 | 0 |
from heapq import heappop, heappush
import numpy as np
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case , ) -> tuple[float | int, list[tuple[int, int]]]:
__lowercase = grid.shape
__lowercase = [-1, 1, 0, 0]
__lowercase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__lowercase = [(0, source)], set()
__lowercase = np.full((rows, cols) , np.inf )
__lowercase = 0
__lowercase = np.empty((rows, cols) , dtype=_lowerCAmelCase )
__lowercase = None
while queue:
(__lowercase) = heappop(_lowerCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__lowercase = []
while (x, y) != source:
path.append((x, y) )
__lowercase = predecessors[x, y]
path.append(_lowerCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_lowerCAmelCase ) ):
__lowercase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__lowercase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_lowerCAmelCase , (dist + 1, (nx, ny)) )
__lowercase = dist + 1
__lowercase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 375 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=50, lowerCamelCase__=0.02, lowerCamelCase__=True, lowerCamelCase__=None, ):
A : List[str] = parent
A : List[str] = batch_size
A : Optional[int] = seq_length
A : Optional[int] = is_training
A : Tuple = use_input_mask
A : Optional[Any] = vocab_size
A : str = hidden_size
A : Any = num_hidden_layers
A : List[Any] = num_attention_heads
A : Optional[int] = intermediate_size
A : int = hidden_act
A : Dict = hidden_dropout_prob
A : Optional[Any] = attention_probs_dropout_prob
A : List[Any] = max_position_embeddings
A : int = initializer_range
A : Tuple = use_labels
A : List[str] = scope
def _lowerCAmelCase ( self ):
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : int = None
if self.use_input_mask:
A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCAmelCase ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, )
def _lowerCAmelCase ( self ):
(
(
A
) , (
A
) , (
A
) , (
A
) ,
) : List[Any] = self.prepare_config_and_inputs()
A : Any = True
A : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : str = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Optional[int] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ )
A : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : List[str] = True
A : Union[str, Any] = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, )
A : Optional[Any] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : Union[str, Any] = True
A : Optional[int] = True
A : Optional[int] = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
# first forward pass
A : int = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, use_cache=lowerCamelCase__, )
A : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
A : int = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
A : List[str] = torch.cat([input_ids, next_tokens], dim=-1 )
A : Union[str, Any] = torch.cat([input_mask, next_mask], dim=-1 )
A : List[str] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
# select random slice
A : Any = ids_tensor((1,), output_from_past.shape[-1] ).item()
A : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
A : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__, ):
A : Optional[int] = BertGenerationDecoder(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : List[str] = model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self ):
A , A , A , A : str = self.prepare_config_and_inputs()
A : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__lowerCamelCase : int = (BertGenerationDecoder,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self ):
A : Any = BertGenerationEncoderTester(self )
A : Optional[int] = ConfigTester(self, config_class=lowerCamelCase__, hidden_size=37 )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A , A , A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
A : Any = """bert"""
self.model_tester.create_and_check_model(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
A : int = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, )
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
A : Tuple = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : Optional[int] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Union[str, Any] = model(lowerCamelCase__ )[0]
A : List[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Tuple = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Dict = model(lowerCamelCase__ )[0]
A : List[str] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Optional[Any] = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
| 662 | 0 |
class __lowercase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Optional[Any]=None):
SCREAMING_SNAKE_CASE_: str = data
SCREAMING_SNAKE_CASE_: Union[str, Any] = previous
SCREAMING_SNAKE_CASE_: Dict = next_node
def __str__( self : Optional[int]):
return F"{self.data}"
def _SCREAMING_SNAKE_CASE ( self : Dict):
return self.data
def _SCREAMING_SNAKE_CASE ( self : str):
return self.next
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return self.previous
class __lowercase :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: Tuple = head
def __iter__( self : int):
return self
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
if not self.current:
raise StopIteration
else:
SCREAMING_SNAKE_CASE_: Optional[Any] = self.current.get_data()
SCREAMING_SNAKE_CASE_: List[str] = self.current.get_next()
return value
class __lowercase :
"""simple docstring"""
def __init__( self : List[str]):
SCREAMING_SNAKE_CASE_: int = None # First node in list
SCREAMING_SNAKE_CASE_: int = None # Last node in list
def __str__( self : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.head
SCREAMING_SNAKE_CASE_: Dict = []
while current is not None:
nodes.append(current.get_data())
SCREAMING_SNAKE_CASE_: Tuple = current.get_next()
return " ".join(str(lowerCamelCase__) for node in nodes)
def __contains__( self : List[str] , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: str = self.head
while current:
if current.get_data() == value:
return True
SCREAMING_SNAKE_CASE_: Dict = current.get_next()
return False
def __iter__( self : Optional[int]):
return LinkedListIterator(self.head)
def _SCREAMING_SNAKE_CASE ( self : Any):
if self.head:
return self.head.get_data()
return None
def _SCREAMING_SNAKE_CASE ( self : Any):
if self.tail:
return self.tail.get_data()
return None
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int):
if self.head is None:
SCREAMING_SNAKE_CASE_: int = node
SCREAMING_SNAKE_CASE_: int = node
else:
self.insert_before_node(self.head , lowerCamelCase__)
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Optional[Any]):
if self.head is None:
self.set_head(lowerCamelCase__)
else:
self.insert_after_node(self.tail , lowerCamelCase__)
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_: List[Any] = Node(lowerCamelCase__)
if self.head is None:
self.set_head(lowerCamelCase__)
else:
self.set_tail(lowerCamelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[Any] = node
SCREAMING_SNAKE_CASE_: str = node.previous
if node.get_previous() is None:
SCREAMING_SNAKE_CASE_: List[str] = node_to_insert
else:
SCREAMING_SNAKE_CASE_: int = node_to_insert
SCREAMING_SNAKE_CASE_: List[str] = node_to_insert
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Any = node
SCREAMING_SNAKE_CASE_: Any = node.next
if node.get_next() is None:
SCREAMING_SNAKE_CASE_: str = node_to_insert
else:
SCREAMING_SNAKE_CASE_: List[Any] = node_to_insert
SCREAMING_SNAKE_CASE_: Any = node_to_insert
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Optional[int] = 1
SCREAMING_SNAKE_CASE_: Optional[Any] = Node(lowerCamelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCamelCase__ , lowerCamelCase__)
return
current_position += 1
SCREAMING_SNAKE_CASE_: str = node.next
self.insert_after_node(self.tail , lowerCamelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Dict = self.head
while node:
if node.get_data() == item:
return node
SCREAMING_SNAKE_CASE_: List[str] = node.get_next()
raise Exception("Node not found")
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : List[str]):
if (node := self.get_node(lowerCamelCase__)) is not None:
if node == self.head:
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.head.get_next()
if node == self.tail:
SCREAMING_SNAKE_CASE_: Optional[Any] = self.tail.get_previous()
self.remove_node_pointers(lowerCamelCase__)
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Dict):
if node.get_next():
SCREAMING_SNAKE_CASE_: Dict = node.previous
if node.get_previous():
SCREAMING_SNAKE_CASE_: List[str] = node.next
SCREAMING_SNAKE_CASE_: Dict = None
SCREAMING_SNAKE_CASE_: List[str] = None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return self.head is None
def A_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : str = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 384}
A : Optional[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Dict = size
# Default value set here for backwards compatibility where the value in config is None
A : Dict = crop_pct if crop_pct is not None else 224 / 256
A : Optional[int] = resample
A : List[str] = do_rescale
A : Tuple = rescale_factor
A : Optional[int] = do_normalize
A : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
A : List[str] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : int = int(shortest_edge / crop_pct )
A : List[Any] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Dict = do_resize if do_resize is not None else self.do_resize
A : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
A : str = resample if resample is not None else self.resample
A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Dict = do_normalize if do_normalize is not None else self.do_normalize
A : List[str] = image_mean if image_mean is not None else self.image_mean
A : Optional[Any] = image_std if image_std is not None else self.image_std
A : Optional[Any] = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : List[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : str = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Tuple = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
| 662 | 0 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCAmelCase = 42
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=3 , lowerCAmelCase_=("DownEncoderBlock2D",) , lowerCAmelCase_=(64,) , lowerCAmelCase_=2 , lowerCAmelCase_=32 , lowerCAmelCase_="silu" , lowerCAmelCase_=True , ):
super().__init__()
__lowercase = layers_per_block
__lowercase = torch.nn.Convad(
lowerCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__lowercase = None
__lowercase = nn.ModuleList([] )
# down
__lowercase = block_out_channels[0]
for i, down_block_type in enumerate(lowerCamelCase__ ):
__lowercase = output_channel
__lowercase = block_out_channels[i]
__lowercase = i == len(lowerCamelCase__ ) - 1
__lowercase = get_down_block(
lowerCamelCase__ , num_layers=self.layers_per_block , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase__ , resnet_groups=lowerCamelCase__ , attention_head_dim=lowerCamelCase__ , temb_channels=lowerCamelCase__ , )
self.down_blocks.append(lowerCamelCase__ )
# mid
__lowercase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase__ , temb_channels=lowerCamelCase__ , )
# out
__lowercase = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase__ , eps=1E-6 )
__lowercase = nn.SiLU()
__lowercase = 2 * out_channels if double_z else out_channels
__lowercase = nn.Convad(block_out_channels[-1] , lowerCamelCase__ , 3 , padding=1 )
__lowercase = False
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = x
__lowercase = self.conv_in(lowerCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase_ ):
def custom_forward(*lowerCAmelCase_ ):
return module(*lowerCamelCase__ )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
__lowercase = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase__ ) , lowerCamelCase__ , use_reentrant=lowerCamelCase__ )
# middle
__lowercase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase__ , use_reentrant=lowerCamelCase__ )
else:
for down_block in self.down_blocks:
__lowercase = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase__ ) , lowerCamelCase__ )
# middle
__lowercase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase__ )
else:
# down
for down_block in self.down_blocks:
__lowercase = down_block(lowerCamelCase__ )
# middle
__lowercase = self.mid_block(lowerCamelCase__ )
# post-process
__lowercase = self.conv_norm_out(lowerCamelCase__ )
__lowercase = self.conv_act(lowerCamelCase__ )
__lowercase = self.conv_out(lowerCamelCase__ )
return sample
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=3 , lowerCAmelCase_=("UpDecoderBlock2D",) , lowerCAmelCase_=(64,) , lowerCAmelCase_=2 , lowerCAmelCase_=32 , lowerCAmelCase_="silu" , lowerCAmelCase_="group" , ):
super().__init__()
__lowercase = layers_per_block
__lowercase = nn.Convad(
lowerCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__lowercase = None
__lowercase = nn.ModuleList([] )
__lowercase = in_channels if norm_type == """spatial""" else None
# mid
__lowercase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase__ , temb_channels=lowerCamelCase__ , )
# up
__lowercase = list(reversed(lowerCamelCase__ ) )
__lowercase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase__ ):
__lowercase = output_channel
__lowercase = reversed_block_out_channels[i]
__lowercase = i == len(lowerCamelCase__ ) - 1
__lowercase = get_up_block(
lowerCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , prev_output_channel=lowerCamelCase__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowerCamelCase__ , resnet_groups=lowerCamelCase__ , attention_head_dim=lowerCamelCase__ , temb_channels=lowerCamelCase__ , resnet_time_scale_shift=lowerCamelCase__ , )
self.up_blocks.append(lowerCamelCase__ )
__lowercase = output_channel
# out
if norm_type == "spatial":
__lowercase = SpatialNorm(block_out_channels[0] , lowerCamelCase__ )
else:
__lowercase = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase__ , eps=1E-6 )
__lowercase = nn.SiLU()
__lowercase = nn.Convad(block_out_channels[0] , lowerCamelCase__ , 3 , padding=1 )
__lowercase = False
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
__lowercase = z
__lowercase = self.conv_in(lowerCamelCase__ )
__lowercase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase_ ):
def custom_forward(*lowerCAmelCase_ ):
return module(*lowerCamelCase__ )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
__lowercase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase__ , lowerCamelCase__ , use_reentrant=lowerCamelCase__ )
__lowercase = sample.to(lowerCamelCase__ )
# up
for up_block in self.up_blocks:
__lowercase = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ , use_reentrant=lowerCamelCase__ )
else:
# middle
__lowercase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase__ , lowerCamelCase__ )
__lowercase = sample.to(lowerCamelCase__ )
# up
for up_block in self.up_blocks:
__lowercase = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ )
else:
# middle
__lowercase = self.mid_block(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = sample.to(lowerCamelCase__ )
# up
for up_block in self.up_blocks:
__lowercase = up_block(lowerCamelCase__ , lowerCamelCase__ )
# post-process
if latent_embeds is None:
__lowercase = self.conv_norm_out(lowerCamelCase__ )
else:
__lowercase = self.conv_norm_out(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = self.conv_act(lowerCamelCase__ )
__lowercase = self.conv_out(lowerCamelCase__ )
return sample
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_="random" , lowerCAmelCase_=False , lowerCAmelCase_=True ):
super().__init__()
__lowercase = n_e
__lowercase = vq_embed_dim
__lowercase = beta
__lowercase = legacy
__lowercase = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__lowercase = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
__lowercase = self.used.shape[0]
__lowercase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowercase = self.re_embed
__lowercase = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
__lowercase = n_e
__lowercase = sane_index_shape
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = inds.shape
assert len(lowerCamelCase__ ) > 1
__lowercase = inds.reshape(ishape[0] , -1 )
__lowercase = self.used.to(lowerCamelCase__ )
__lowercase = (inds[:, :, None] == used[None, None, ...]).long()
__lowercase = match.argmax(-1 )
__lowercase = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowercase = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__lowercase = self.unknown_index
return new.reshape(lowerCamelCase__ )
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = inds.shape
assert len(lowerCamelCase__ ) > 1
__lowercase = inds.reshape(ishape[0] , -1 )
__lowercase = self.used.to(lowerCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
__lowercase = 0 # simply set to zero
__lowercase = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase__ )
return back.reshape(lowerCamelCase__ )
def snake_case__ ( self , lowerCAmelCase_ ):
# reshape z -> (batch, height, width, channel) and flatten
__lowercase = z.permute(0 , 2 , 3 , 1 ).contiguous()
__lowercase = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowercase = torch.argmin(torch.cdist(lowerCamelCase__ , self.embedding.weight ) , dim=1 )
__lowercase = self.embedding(lowerCamelCase__ ).view(z.shape )
__lowercase = None
__lowercase = None
# compute loss for embedding
if not self.legacy:
__lowercase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowercase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowercase = z + (z_q - z).detach()
# reshape back to match original input shape
__lowercase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__lowercase = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__lowercase = self.remap_to_used(lowerCamelCase__ )
__lowercase = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__lowercase = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
__lowercase = indices.reshape(shape[0] , -1 ) # add batch axis
__lowercase = self.unmap_to_all(lowerCamelCase__ )
__lowercase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowercase = self.embedding(lowerCamelCase__ )
if shape is not None:
__lowercase = z_q.view(lowerCamelCase__ )
# reshape back to match original input shape
__lowercase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=False ):
__lowercase = parameters
__lowercase = torch.chunk(lowerCamelCase__ , 2 , dim=1 )
__lowercase = torch.clamp(self.logvar , -30.0 , 20.0 )
__lowercase = deterministic
__lowercase = torch.exp(0.5 * self.logvar )
__lowercase = torch.exp(self.logvar )
if self.deterministic:
__lowercase = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case__ ( self , lowerCAmelCase_ = None ):
# make sure sample is on the same device as the parameters and has same dtype
__lowercase = randn_tensor(
self.mean.shape , generator=lowerCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
__lowercase = self.mean + self.std * sample
return x
def snake_case__ ( self , lowerCAmelCase_=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
__lowercase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase__ )
def snake_case__ ( self ):
return self.mean
| 321 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
A : Dict = """backbone.""" if is_semantic else """"""
A : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
A : Dict = """backbone.""" if is_semantic else """"""
# queries, keys and values
A : Union[str, Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A : Optional[int] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A : int = in_proj_weight[
: config.hidden_size, :
]
A : Any = q_bias
A : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A : Tuple = in_proj_weight[
-config.hidden_size :, :
]
A : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A : str = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A : Dict = gamma_a
A : Dict = gamma_a
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : List[str] = dct.pop(_lowerCAmelCase )
A : Optional[Any] = val
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
A : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
"""simple docstring"""
A : Dict = False if """rvlcdip""" in checkpoint_url else True
A : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A : Dict = 1024
A : List[Any] = 4096
A : int = 24
A : int = 16
# labels
if "rvlcdip" in checkpoint_url:
A : List[Any] = 16
A : List[Any] = """huggingface/label-files"""
A : int = """rvlcdip-id2label.json"""
A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A : int = idalabel
A : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
A : str = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase )
# load HuggingFace model
A : Any = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A : Any = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase )
A : int = prepare_img()
A : Tuple = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
A : str = encoding["""pixel_values"""]
A : Tuple = model(_lowerCAmelCase )
A : Optional[int] = outputs.logits
# verify logits
A : Tuple = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
A : Any = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
A : List[Any] = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 662 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = 42
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
@register_to_config
def __init__( self , __UpperCAmelCase = 65536 , __UpperCAmelCase = None , __UpperCAmelCase = 2 , __UpperCAmelCase = 2 , __UpperCAmelCase = 0 , __UpperCAmelCase = "fourier" , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = 0.0 , __UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , __UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , __UpperCAmelCase = "UNetMidBlock1D" , __UpperCAmelCase = None , __UpperCAmelCase = (32, 32, 64) , __UpperCAmelCase = None , __UpperCAmelCase = 8 , __UpperCAmelCase = 1 , __UpperCAmelCase = False , ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = sample_size
# time
if time_embedding_type == "fourier":
__lowerCamelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase__ , log=lowerCamelCase__ , flip_sin_to_cos=lowerCamelCase__ )
__lowerCamelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__lowerCamelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase__ , downscale_freq_shift=lowerCamelCase__ )
__lowerCamelCase = block_out_channels[0]
if use_timestep_embedding:
__lowerCamelCase = block_out_channels[0] * 4
__lowerCamelCase = TimestepEmbedding(
in_channels=lowerCamelCase__ , time_embed_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ , out_dim=block_out_channels[0] , )
__lowerCamelCase = nn.ModuleList([] )
__lowerCamelCase = None
__lowerCamelCase = nn.ModuleList([] )
__lowerCamelCase = None
# down
__lowerCamelCase = in_channels
for i, down_block_type in enumerate(lowerCamelCase__ ):
__lowerCamelCase = output_channel
__lowerCamelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__lowerCamelCase = i == len(lowerCamelCase__ ) - 1
__lowerCamelCase = get_down_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase__ )
# mid
__lowerCamelCase = get_mid_block(
lowerCamelCase__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase__ , add_downsample=lowerCamelCase__ , )
# up
__lowerCamelCase = list(reversed(lowerCamelCase__ ) )
__lowerCamelCase = reversed_block_out_channels[0]
if out_block_type is None:
__lowerCamelCase = out_channels
else:
__lowerCamelCase = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase__ ):
__lowerCamelCase = output_channel
__lowerCamelCase = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase__ ) - 1 else final_upsample_channels
)
__lowerCamelCase = i == len(lowerCamelCase__ ) - 1
__lowerCamelCase = get_up_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase__ )
__lowerCamelCase = output_channel
# out
__lowerCamelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
__lowerCamelCase = get_out_block(
out_block_type=lowerCamelCase__ , num_groups_out=lowerCamelCase__ , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase__ , act_fn=lowerCamelCase__ , fc_dim=block_out_channels[-1] // 4 , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True , ):
'''simple docstring'''
__lowerCamelCase = timestep
if not torch.is_tensor(lowerCamelCase__ ):
__lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
__lowerCamelCase = timesteps[None].to(sample.device )
__lowerCamelCase = self.time_proj(lowerCamelCase__ )
if self.config.use_timestep_embedding:
__lowerCamelCase = self.time_mlp(lowerCamelCase__ )
else:
__lowerCamelCase = timestep_embed[..., None]
__lowerCamelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
__lowerCamelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
__lowerCamelCase = ()
for downsample_block in self.down_blocks:
__lowerCamelCase = downsample_block(hidden_states=lowerCamelCase__ , temb=lowerCamelCase__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__lowerCamelCase = self.mid_block(lowerCamelCase__ , lowerCamelCase__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
__lowerCamelCase = down_block_res_samples[-1:]
__lowerCamelCase = down_block_res_samples[:-1]
__lowerCamelCase = upsample_block(lowerCamelCase__ , res_hidden_states_tuple=lowerCamelCase__ , temb=lowerCamelCase__ )
# 5. post-process
if self.out_block:
__lowerCamelCase = self.out_block(lowerCamelCase__ , lowerCamelCase__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase__ )
| 175 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""", lowerCamelCase__, )
super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
| 662 | 0 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_UpperCAmelCase : Dict = NewType("""DataClass""", Any)
_UpperCAmelCase : Dict = NewType("""DataClassType""", Any)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Callable[[str], Any]:
lowerCamelCase__ : int = {str(_lowerCAmelCase ): choice for choice in choices}
return lambda _UpperCAmelCase : str_to_choice.get(_lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( *,
_UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = dataclasses.MISSING , _UpperCAmelCase = dataclasses.MISSING , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCamelCase__ : Tuple = {}
if aliases is not None:
lowerCamelCase__ : Dict = aliases
if help is not None:
lowerCamelCase__ : Any = help
return dataclasses.field(metadata=_lowerCAmelCase , default=_lowerCAmelCase , default_factory=_lowerCAmelCase , **_lowerCAmelCase )
class lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = 42
def __init__( self : Any , UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ) -> Dict:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowerCamelCase__ : Tuple = ArgumentDefaultsHelpFormatter
super().__init__(**lowerCamelCase__ )
if dataclasses.is_dataclass(lowerCamelCase__ ):
lowerCamelCase__ : int = [dataclass_types]
lowerCamelCase__ : Any = list(lowerCamelCase__ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCamelCase__ )
@staticmethod
def A_ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ) -> str:
lowerCamelCase__ : Union[str, Any] = F"""--{field.name}"""
lowerCamelCase__ : str = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowerCamelCase__ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
lowerCamelCase__ : Optional[int] = kwargs.pop('aliases' , [] )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase__ : Optional[Any] = [aliases]
lowerCamelCase__ : List[Any] = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(lowerCamelCase__ , 'UnionType' ) and isinstance(lowerCamelCase__ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCamelCase__ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F""" Problem encountered in field \'{field.name}\'.""" )
if type(lowerCamelCase__ ) not in field.type.__args__:
# filter `str` in Union
lowerCamelCase__ : Optional[Any] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCamelCase__ : List[str] = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCamelCase__ : List[Any] = (
field.type.__args__[0] if isinstance(lowerCamelCase__ , field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCamelCase__ : Optional[Any] = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCamelCase__ : Tuple = {}
if origin_type is Literal or (isinstance(field.type , lowerCamelCase__ ) and issubclass(field.type , lowerCamelCase__ )):
if origin_type is Literal:
lowerCamelCase__ : int = field.type.__args__
else:
lowerCamelCase__ : Union[str, Any] = [x.value for x in field.type]
lowerCamelCase__ : Optional[Any] = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
lowerCamelCase__ : Optional[Any] = field.default
else:
lowerCamelCase__ : Optional[int] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCamelCase__ : Optional[Any] = copy(lowerCamelCase__ )
# Hack because type=bool in argparse does not behave as we want.
lowerCamelCase__ : Optional[int] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCamelCase__ : Any = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCamelCase__ : Tuple = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCamelCase__ : Union[str, Any] = """?"""
# This is the value that will get picked if we do --field_name (without value)
lowerCamelCase__ : Optional[int] = True
elif isclass(lowerCamelCase__ ) and issubclass(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase__ : Any = field.type.__args__[0]
lowerCamelCase__ : List[str] = """+"""
if field.default_factory is not dataclasses.MISSING:
lowerCamelCase__ : str = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCamelCase__ : Optional[int] = True
else:
lowerCamelCase__ : Tuple = field.type
if field.default is not dataclasses.MISSING:
lowerCamelCase__ : Tuple = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCamelCase__ : str = field.default_factory()
else:
lowerCamelCase__ : Tuple = True
parser.add_argument(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCamelCase__ : Union[str, Any] = False
parser.add_argument(F"""--no_{field.name}""" , action='store_false' , dest=field.name , **lowerCamelCase__ )
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[Any] ) -> List[Any]:
if hasattr(lowerCamelCase__ , '_argument_group_name' ):
lowerCamelCase__ : Optional[int] = self.add_argument_group(dtype._argument_group_name )
else:
lowerCamelCase__ : Optional[int] = self
try:
lowerCamelCase__ : Dict[str, type] = get_type_hints(lowerCamelCase__ )
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowerCamelCase__ ):
lowerCamelCase__ : int = """.""".join(map(lowerCamelCase__ , sys.version_info[:3] ) )
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(lowerCamelCase__ ):
if not field.init:
continue
lowerCamelCase__ : List[str] = type_hints[field.name]
self._parse_dataclass_field(lowerCamelCase__ , lowerCamelCase__ )
def A_ ( self : Optional[Any] , UpperCAmelCase : int=None , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[int]=None , ) -> Optional[int]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCamelCase__ : Optional[int] = []
if args_filename:
args_files.append(Path(lowerCamelCase__ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCamelCase__ : Union[str, Any] = ArgumentParser()
args_file_parser.add_argument(lowerCamelCase__ , type=lowerCamelCase__ , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCamelCase__ : List[Any] = args_file_parser.parse_known_args(args=lowerCamelCase__ )
lowerCamelCase__ : Dict = vars(lowerCamelCase__ ).get(args_file_flag.lstrip('-' ) , lowerCamelCase__ )
if cmd_args_file_paths:
args_files.extend([Path(lowerCamelCase__ ) for p in cmd_args_file_paths] )
lowerCamelCase__ : Any = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCamelCase__ : Dict = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCamelCase__ : Tuple = self.parse_known_args(args=lowerCamelCase__ )
lowerCamelCase__ : List[Any] = []
for dtype in self.dataclass_types:
lowerCamelCase__ : Union[str, Any] = {f.name for f in dataclasses.fields(lowerCamelCase__ ) if f.init}
lowerCamelCase__ : Any = {k: v for k, v in vars(lowerCamelCase__ ).items() if k in keys}
for k in keys:
delattr(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase__ : str = dtype(**lowerCamelCase__ )
outputs.append(lowerCamelCase__ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCamelCase__ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any = False ) -> Optional[int]:
lowerCamelCase__ : str = set(args.keys() )
lowerCamelCase__ : Tuple = []
for dtype in self.dataclass_types:
lowerCamelCase__ : List[str] = {f.name for f in dataclasses.fields(lowerCamelCase__ ) if f.init}
lowerCamelCase__ : Optional[int] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCamelCase__ : Optional[int] = dtype(**lowerCamelCase__ )
outputs.append(lowerCamelCase__ )
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(lowerCamelCase__ )}""" )
return tuple(lowerCamelCase__ )
def A_ ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str = False ) -> Optional[Any]:
with open(Path(lowerCamelCase__ ) , encoding='utf-8' ) as open_json_file:
lowerCamelCase__ : Optional[Any] = json.loads(open_json_file.read() )
lowerCamelCase__ : Tuple = self.parse_dict(lowerCamelCase__ , allow_extra_keys=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def A_ ( self : str , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] = False ) -> Dict:
lowerCamelCase__ : List[str] = self.parse_dict(yaml.safe_load(Path(lowerCamelCase__ ).read_text() ) , allow_extra_keys=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 295 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(
lowerCamelCase__, split=lowerCamelCase__, features=lowerCamelCase__, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__, streaming=lowerCamelCase__, num_proc=lowerCamelCase__, **lowerCamelCase__, )
A : List[Any] = path_or_paths if isinstance(lowerCamelCase__, lowerCamelCase__ ) else {self.split: path_or_paths}
A : str = Text(
cache_dir=lowerCamelCase__, data_files=lowerCamelCase__, features=lowerCamelCase__, **lowerCamelCase__, )
def _lowerCAmelCase ( self ):
# Build iterable dataset
if self.streaming:
A : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A : List[str] = None
A : Dict = None
A : Tuple = None
A : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__, download_mode=lowerCamelCase__, verification_mode=lowerCamelCase__, base_path=lowerCamelCase__, num_proc=self.num_proc, )
A : List[str] = self.builder.as_dataset(
split=self.split, verification_mode=lowerCamelCase__, in_memory=self.keep_in_memory )
return dataset
| 662 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCAmelCase : Any = ["torch", "transformers", "onnx"]
def __init__( self : Any , *lowercase_ : List[str] , **lowercase_ : Any ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _snake_case ( cls : Optional[Any] , *lowercase_ : int , **lowercase_ : Tuple ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _snake_case ( cls : List[Any] , *lowercase_ : List[Any] , **lowercase_ : str ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCAmelCase : Union[str, Any] = ["torch", "transformers", "onnx"]
def __init__( self : str , *lowercase_ : List[Any] , **lowercase_ : Union[str, Any] ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _snake_case ( cls : int , *lowercase_ : Optional[int] , **lowercase_ : List[str] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _snake_case ( cls : Dict , *lowercase_ : Optional[int] , **lowercase_ : int ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCAmelCase : Dict = ["torch", "transformers", "onnx"]
def __init__( self : int , *lowercase_ : Tuple , **lowercase_ : List[str] ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _snake_case ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : str ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _snake_case ( cls : str , *lowercase_ : Optional[int] , **lowercase_ : Optional[int] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCAmelCase : Union[str, Any] = ["torch", "transformers", "onnx"]
def __init__( self : Tuple , *lowercase_ : List[str] , **lowercase_ : Optional[int] ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _snake_case ( cls : Union[str, Any] , *lowercase_ : Tuple , **lowercase_ : Dict ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _snake_case ( cls : List[Any] , *lowercase_ : List[Any] , **lowercase_ : List[Any] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCAmelCase : str = ["torch", "transformers", "onnx"]
def __init__( self : Any , *lowercase_ : int , **lowercase_ : Dict ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _snake_case ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : List[str] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _snake_case ( cls : Dict , *lowercase_ : Tuple , **lowercase_ : Dict ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCAmelCase : Dict = ["torch", "transformers", "onnx"]
def __init__( self : List[str] , *lowercase_ : Any , **lowercase_ : Dict ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _snake_case ( cls : List[str] , *lowercase_ : Optional[int] , **lowercase_ : Union[str, Any] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _snake_case ( cls : Dict , *lowercase_ : Any , **lowercase_ : Optional[Any] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 123 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
SCREAMING_SNAKE_CASE_:int = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 662 | 0 |
import math
def _UpperCamelCase ( snake_case__, snake_case__ ) -> float:
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_lowerCAmelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 382 |
def __UpperCamelCase ( _lowerCAmelCase = 1000 ) -> int:
"""simple docstring"""
A , A : str = 1, 1
A : List[Any] = []
for i in range(1 , n + 1 ):
A : Optional[int] = prev_numerator + 2 * prev_denominator
A : Any = prev_numerator + prev_denominator
if len(str(_lowerCAmelCase ) ) > len(str(_lowerCAmelCase ) ):
result.append(_lowerCAmelCase )
A : int = numerator
A : int = denominator
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 662 | 0 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def _UpperCAmelCase ( a : List[Any] ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
snake_case__ = k.replace(_lowerCAmelCase , _lowerCAmelCase )
if k.startswith("""encoder""" ):
snake_case__ = k.replace(""".attn""" , """.self_attn""" )
snake_case__ = k.replace("""norm1""" , """self_attn_layer_norm""" )
snake_case__ = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
snake_case__ = k.replace("""norm1""" , """self_attn_layer_norm""" )
snake_case__ = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
snake_case__ = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _UpperCAmelCase ( a : Any ):
snake_case__ = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
snake_case__ = sd.pop(_lowerCAmelCase )
snake_case__ = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
snake_case__ = v
a__ = ["""START"""]
@torch.no_grad()
def _UpperCAmelCase ( a : Union[str, Any] , a : Optional[int] , a : int ):
snake_case__ = torch.load(_lowerCAmelCase , map_location="""cpu""" )
snake_case__ = model["""model"""]
snake_case__ = BlenderbotConfig.from_json_file(_lowerCAmelCase )
snake_case__ = BlenderbotForConditionalGeneration(_lowerCAmelCase )
snake_case__ = m.model.state_dict().keys()
snake_case__ = []
snake_case__ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
snake_case__ = rename_state_dict_key(_lowerCAmelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
snake_case__ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCAmelCase )
m.model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
m.half()
m.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
a__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 654 |
import re
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
if len(re.findall("""[ATCG]""" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =MobileBertConfig.from_json_file(_lowerCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
lowerCamelCase__: Union[str, Any] =MobileBertForPreTraining(_lowerCAmelCase )
# Load weights from tf checkpoint
lowerCamelCase__: str =load_tf_weights_in_mobilebert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 59 |
from __future__ import annotations
SCREAMING_SNAKE_CASE_:Tuple = """#"""
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self ):
A : dict = {}
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = self._trie
for char in text:
if char not in trie:
A : str = {}
A : str = trie[char]
A : Optional[int] = True
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Dict = self._trie
for char in prefix:
if char in trie:
A : Optional[Any] = trie[char]
else:
return []
return self._elements(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : int = []
for c, v in d.items():
A : List[Any] = [""" """] if c == END else [(c + s) for s in self._elements(lowerCamelCase__ )]
result.extend(lowerCamelCase__ )
return tuple(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_:Any = Trie()
SCREAMING_SNAKE_CASE_:Tuple = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple:
"""simple docstring"""
A : List[str] = trie.find_word(_lowerCAmelCase )
return tuple(string + word for word in suffixes )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 662 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 379 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
SCREAMING_SNAKE_CASE_:Optional[int] = logging.getLogger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = bnb_quantization_config.load_in_abit
A : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
A : Any = []
# custom device map
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(device_map.keys() ) > 1:
A : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A : int = get_keys_to_not_convert(_lowerCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowerCAmelCase )
A : Optional[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A : Dict = []
A : Tuple = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowerCAmelCase )
# compatibility with peft
A : Union[str, Any] = load_in_abit
A : Tuple = load_in_abit
A : List[str] = get_parameter_device(_lowerCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
A : Optional[int] = replace_with_bnb_layers(_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
# convert param to the right dtype
A : Tuple = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A : Optional[Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
A : int = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowerCAmelCase ):
param.to(_lowerCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
A : str = replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
A : Optional[Any] = get_quantized_model_device_map(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_memory=_lowerCAmelCase , no_split_module_classes=_lowerCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A : Tuple = True
A : int = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCAmelCase , offload_state_dict=_lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowerCAmelCase , device_map=_lowerCAmelCase , offload_dir=_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[int]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
A : Optional[int] = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
A : Tuple = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A : Any = {}
A : List[str] = special_dtypes
A : Any = no_split_module_classes
A : Union[str, Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A : Tuple = get_balanced_memory(
_lowerCAmelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=_lowerCAmelCase , **_lowerCAmelCase , )
A : int = max_memory
A : Any = infer_auto_device_map(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# check if don't have any quantized module on the cpu
A : Optional[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A : Optional[int] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
A : Optional[Any] = []
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> int:
"""simple docstring"""
A : Optional[int] = False
for name, module in model.named_children():
if current_key_name is None:
A : int = []
current_key_name.append(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A : Dict = """.""".join(_lowerCAmelCase )
A : Optional[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A : Dict = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A : Optional[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
A : Any = module.weight.data
if module.bias is not None:
A : Any = module.bias.data
bnb_module.requires_grad_(_lowerCAmelCase )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Dict = True
if len(list(module.children() ) ) > 0:
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __UpperCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
with init_empty_weights():
A : Tuple = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A : Optional[int] = find_tied_parameters(_lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A : Optional[int] = sum(_lowerCAmelCase , [] )
A : Tuple = len(_lowerCAmelCase ) > 0
# Check if it is a base model
A : List[str] = False
if hasattr(_lowerCAmelCase , """base_model_prefix""" ):
A : Optional[Any] = not hasattr(_lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A : str = list(model.named_children() )
A : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
A : int = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
A : Optional[Any] = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase )
# remove ".weight" from the keys
A : Union[str, Any] = [""".weight""", """.bias"""]
A : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A : List[str] = name.replace(_lowerCAmelCase , """""" )
filtered_module_names.append(_lowerCAmelCase )
return filtered_module_names
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
for m in model.modules():
if isinstance(_lowerCAmelCase , bnb.nn.Linearabit ):
return True
return False
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return next(parameter.parameters() ).device
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , 0 , dtype=_lowerCAmelCase , value=_lowerCAmelCase )
A : Tuple = param_name
A : Union[str, Any] = model
if "." in tensor_name:
A : int = tensor_name.split(""".""" )
for split in splits[:-1]:
A : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
A : Optional[Any] = new_module
A : List[str] = splits[-1]
# offload weights
A : Optional[int] = False
offload_weight(module._parameters[tensor_name] , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase , )
else:
offload_weight(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
offload_weight(_lowerCAmelCase , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase )
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , """meta""" , dtype=_lowerCAmelCase , value=torch.empty(*param.size() ) )
| 662 | 0 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__magic_name__ = object()
# For specifying empty leaf dict `{}`
__magic_name__ = object()
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__ ) -> str:
'''simple docstring'''
a__ = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(_lowerCAmelCase ) - len(_lowerCAmelCase ) + 1 ):
a__ = [x.match(_lowerCAmelCase ) for x, y in zip(_lowerCAmelCase,ks[i:] )]
if matches and all(_lowerCAmelCase ):
return True
return False
def _lowerCamelCase ( UpperCAmelCase__ ) -> str:
'''simple docstring'''
def replace(UpperCAmelCase__,UpperCAmelCase__ ):
for rule, replacement in rules:
if _match(_lowerCAmelCase,_lowerCAmelCase ):
return replacement
return val
return replace
def _lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp',_lowerCAmelCase )),
(("transformer", "wte", "embedding"), P('mp',_lowerCAmelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_lowerCAmelCase,'mp' )),
(("attention", "out_proj", "kernel"), P('mp',_lowerCAmelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(_lowerCAmelCase,'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp',_lowerCAmelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _lowerCamelCase ( UpperCAmelCase__ ) -> Dict:
'''simple docstring'''
a__ = _get_partition_rules()
a__ = _replacement_rules(_lowerCAmelCase )
a__ = {k: _unmatched for k in flatten_dict(_lowerCAmelCase )}
a__ = {k: replace(_lowerCAmelCase,_lowerCAmelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(_lowerCAmelCase ) )
| 232 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
A : Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
A : Dict = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
A : Tuple = parser.parse_args()
if not hasattr(_lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
A : Any = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 662 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ : Tuple = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
SCREAMING_SNAKE_CASE_ : str = 25_0004
SCREAMING_SNAKE_CASE_ : Tuple = 25_0020
@require_sentencepiece
@require_tokenizers
class snake_case_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = MBartaaTokenizer
__UpperCamelCase = MBartaaTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = MBartaaTokenizer(lowerCamelCase__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = """<s>"""
__lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(lowerCamelCase__ ) , 1_054 )
def UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_054 )
def UpperCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
__lowercase = MBartaaTokenizer(lowerCamelCase__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=lowerCamelCase__ )
__lowercase = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowercase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
__lowercase = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowercase = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def UpperCAmelCase ( self : List[str] ) -> Dict:
'''simple docstring'''
__lowercase = {"""input_ids""": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def UpperCAmelCase ( self : int ) -> Optional[int]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__lowercase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
__lowercase = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
__lowercase = tempfile.mkdtemp()
__lowercase = tokenizer_r.save_pretrained(lowerCamelCase__ )
__lowercase = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
__lowercase = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(lowerCamelCase__ , lowerCamelCase__ )
# Checks everything loads correctly in the same way
__lowercase = tokenizer_r.from_pretrained(lowerCamelCase__ )
__lowercase = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=True
__lowercase = tempfile.mkdtemp()
__lowercase = tokenizer_r.save_pretrained(lowerCamelCase__ , legacy_format=lowerCamelCase__ )
__lowercase = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase__ , lowerCamelCase__ )
# Checks everything loads correctly in the same way
__lowercase = tokenizer_r.from_pretrained(lowerCamelCase__ )
__lowercase = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=False
__lowercase = tempfile.mkdtemp()
__lowercase = tokenizer_r.save_pretrained(lowerCamelCase__ , legacy_format=lowerCamelCase__ )
__lowercase = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowercase = tokenizer_r.from_pretrained(lowerCamelCase__ )
__lowercase = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = "facebook/mbart-large-50-one-to-many-mmt"
__UpperCamelCase = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__UpperCamelCase = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__UpperCamelCase = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def UpperCAmelCase ( cls : int ) -> Optional[Any]:
'''simple docstring'''
__lowercase = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
__lowercase = 1
return cls
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 250_038 )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__lowercase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase__ )
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
self.assertIn(lowerCamelCase__ , self.tokenizer.all_special_ids )
__lowercase = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
__lowercase = self.tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
__lowercase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase__ )
def UpperCAmelCase ( self : int ) -> Optional[int]:
'''simple docstring'''
__lowercase = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , lowerCamelCase__ )
__lowercase = 10
__lowercase = self.tokenizer(lowerCamelCase__ , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ ).input_ids[0]
self.assertEqual(ids[0] , lowerCamelCase__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250_053, 250_001] )
def UpperCAmelCase ( self : Optional[int] ) -> Any:
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
__lowercase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase__ )
__lowercase = MBartaaTokenizer.from_pretrained(lowerCamelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase__ )
@require_torch
def UpperCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase__ , return_tensors='pt' )
__lowercase = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
__lowercase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__lowercase = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__lowercase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.tokenizer(self.src_text , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=3 , return_tensors='pt' )
__lowercase = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=10 , return_tensors='pt' )
__lowercase = targets["""input_ids"""]
__lowercase = shift_tokens_right(lowerCamelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase ( self : List[Any] ) -> Any:
'''simple docstring'''
__lowercase = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , {
# en_XX, A, test, EOS
'input_ids': [[250_004, 62, 3_034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250_001,
} , )
| 375 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:int = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 | 0 |
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = len(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
SCREAMING_SNAKE_CASE_: List[str] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
SCREAMING_SNAKE_CASE_: int = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
SCREAMING_SNAKE_CASE_: str = subset[i - 1][j]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE_: str = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
A : Optional[int] = int(_lowerCAmelCase )
# Initialize Result
A : int = []
# Traverse through all denomination
for denomination in reversed(_lowerCAmelCase ):
# Find denominations
while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ):
total_value -= int(_lowerCAmelCase )
answer.append(_lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = []
SCREAMING_SNAKE_CASE_:Dict = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
SCREAMING_SNAKE_CASE_:Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F"""Following is minimal change for {value}: """)
SCREAMING_SNAKE_CASE_:str = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 662 | 0 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCAmelCase__ = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=1 ):
__lowercase = tokenizer
__lowercase = dataset
__lowercase = len(lowerCamelCase__ ) if n_tasks is None else n_tasks
__lowercase = n_copies
def __iter__( self ):
__lowercase = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
__lowercase = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = start_length
__lowercase = eof_strings
__lowercase = tokenizer
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
__lowercase = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
__lowercase = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase__ )
def __lowercase ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
__lowercase = re.split("(%s)" % "|".join(_lowerCAmelCase ) , _lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=20 , **_UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__lowercase = defaultdict(_lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCAmelCase ) ):
with torch.no_grad():
__lowercase = batch["""ids"""].shape[-1]
__lowercase = accelerator.unwrap_model(_lowerCAmelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=_lowerCAmelCase , **_lowerCAmelCase )
# each task is generated batch_size times
__lowercase = batch["""task_id"""].repeat(_lowerCAmelCase )
__lowercase = accelerator.pad_across_processes(
_lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
__lowercase = accelerator.gather((generated_tokens, generated_tasks) )
__lowercase = generated_tokens.cpu().numpy()
__lowercase = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCAmelCase , _lowerCAmelCase ):
gen_token_dict[task].append(_lowerCAmelCase )
__lowercase = [[] for _ in range(_lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__lowercase = tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
code_gens[task].append(remove_last_block(_lowerCAmelCase ) )
return code_gens
def __lowercase ( ) -> List[Any]:
'''simple docstring'''
__lowercase = HfArgumentParser(_lowerCAmelCase )
__lowercase = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__lowercase = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__lowercase = """false"""
if args.num_workers is None:
__lowercase = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__lowercase = Accelerator()
set_seed(args.seed , device_specific=_lowerCAmelCase )
# Load model and tokenizer
__lowercase = AutoTokenizer.from_pretrained(args.model_ckpt )
__lowercase = tokenizer.eos_token
__lowercase = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__lowercase = {
"""do_sample""": args.do_sample,
"""temperature""": args.temperature,
"""max_new_tokens""": args.max_new_tokens,
"""top_p""": args.top_p,
"""top_k""": args.top_k,
"""stopping_criteria""": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCAmelCase , _lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
__lowercase = load_dataset("openai_humaneval" )
__lowercase = load_metric("code_eval" )
__lowercase = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
__lowercase = args.n_samples // args.batch_size
__lowercase = TokenizedDataset(_lowerCAmelCase , human_eval["test"] , n_copies=_lowerCAmelCase , n_tasks=_lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
__lowercase = DataLoader(_lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__lowercase = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
__lowercase = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = complete_code(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , n_tasks=_lowerCAmelCase , batch_size=args.batch_size , **_lowerCAmelCase , )
if accelerator.is_main_process:
__lowercase = []
for task in tqdm(range(_lowerCAmelCase ) ):
__lowercase = human_eval["""test"""][task]["""test"""]
__lowercase = f'''check({human_eval["test"][task]["entry_point"]})'''
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
__lowercase = code_eval_metric.compute(
references=_lowerCAmelCase , predictions=_lowerCAmelCase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 321 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_:Union[str, Any] = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE_:Any = dict(zip(vocab, range(len(vocab))))
SCREAMING_SNAKE_CASE_:Dict = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_:List[Any] = Path(tmpdirname)
SCREAMING_SNAKE_CASE_:str = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
SCREAMING_SNAKE_CASE_:Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
SCREAMING_SNAKE_CASE_:Any = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
SCREAMING_SNAKE_CASE_:Optional[Any] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
SCREAMING_SNAKE_CASE_:Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
SCREAMING_SNAKE_CASE_:str = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 662 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""CLIPFeatureExtractor"""]
a_ = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 175 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = """Hello, World!"""
SCREAMING_SNAKE_CASE_:List[Any] = """en_XX"""
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Optional[int] = Path("""data_bin""" )
A : Optional[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
A : Any = xmod.model.encoder.sentence_encoder
A : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowerCAmelCase )
A : int = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A : Any = xmod_sent_encoder.embed_tokens.weight
A : int = xmod_sent_encoder.embed_positions.weight
A : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A : Dict = xmod_sent_encoder.layernorm_embedding.weight
A : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A : str = model.roberta.encoder.layer[i]
A : Tuple = xmod_sent_encoder.layers[i]
# self attention
A : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
A : List[str] = xmod_layer.self_attn.q_proj.weight
A : Optional[int] = xmod_layer.self_attn.q_proj.bias
A : List[Any] = xmod_layer.self_attn.k_proj.weight
A : Union[str, Any] = xmod_layer.self_attn.k_proj.bias
A : Optional[int] = xmod_layer.self_attn.v_proj.weight
A : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
A : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
A : Optional[Any] = xmod_layer.self_attn.out_proj.weight
A : Dict = xmod_layer.self_attn.out_proj.bias
A : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight
A : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
A : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
A : Optional[int] = xmod_layer.fca.weight
A : Optional[int] = xmod_layer.fca.bias
# output
A : Dict = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
A : Union[str, Any] = xmod_layer.fca.weight
A : int = xmod_layer.fca.bias
A : List[str] = xmod_layer.final_layer_norm.weight
A : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A : str = xmod_layer.adapter_layer_norm.weight
A : str = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A : Optional[int] = bert_output.adapter_modules[lang_code]
A : int = xmod_layer.adapter_modules[lang_code]
A : Optional[Any] = from_adapter.fca.weight
A : Optional[Any] = from_adapter.fca.bias
A : List[str] = from_adapter.fca.weight
A : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A : Dict = xmod_sent_encoder.layer_norm.weight
A : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
A : int = xmod.model.classification_heads["""mnli"""].dense.weight
A : Optional[Any] = xmod.model.classification_heads["""mnli"""].dense.bias
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
A : Any = xmod.model.encoder.lm_head.dense.weight
A : Tuple = xmod.model.encoder.lm_head.dense.bias
A : Any = xmod.model.encoder.lm_head.layer_norm.weight
A : List[str] = xmod.model.encoder.lm_head.layer_norm.bias
A : Union[str, Any] = xmod.model.encoder.lm_head.weight
A : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A : Optional[int] = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
A : List[str] = model(_lowerCAmelCase )[0]
if classification_head:
A : Dict = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) )
else:
A : Optional[Any] = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A : Optional[Any] = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 662 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = ["torch", "scipy"]
def __init__( self : Union[str, Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : Optional[int] ) -> int:
requires_backends(self , ['torch', 'scipy'] )
@classmethod
def A_ ( cls : Tuple , *UpperCAmelCase : Tuple , **UpperCAmelCase : str ) -> int:
requires_backends(cls , ['torch', 'scipy'] )
@classmethod
def A_ ( cls : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['torch', 'scipy'] )
| 295 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Any = tempfile.mkdtemp()
A : List[str] = BlipImageProcessor()
A : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
A : str = BlipProcessor(lowerCamelCase__, lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).tokenizer
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).image_processor
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Any = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : Any = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : int = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Any = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
A : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase__, padding_value=1.0 )
A : Dict = BlipProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : str = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = self.prepare_image_inputs()
A : int = image_processor(lowerCamelCase__, return_tensors="""np""" )
A : Optional[Any] = processor(images=lowerCamelCase__, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : List[str] = self.get_image_processor()
A : int = self.get_tokenizer()
A : str = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = """lower newer"""
A : List[Any] = processor(text=lowerCamelCase__ )
A : str = tokenizer(lowerCamelCase__, return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Union[str, Any] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : Union[str, Any] = self.prepare_image_inputs()
A : str = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Optional[int] = processor.batch_decode(lowerCamelCase__ )
A : Dict = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : int = self.get_tokenizer()
A : Optional[int] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : List[str] = self.prepare_image_inputs()
A : Optional[int] = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 662 | 0 |
"""simple docstring"""
import os
import numpy
import onnx
def __lowercase ( _a , _a ):
snake_case_ : int = a.name
snake_case_ : Optional[int] = b.name
snake_case_ : Tuple = """"""
snake_case_ : List[Any] = """"""
snake_case_ : Union[str, Any] = a == b
snake_case_ : Optional[Any] = name_a
snake_case_ : List[Any] = name_b
return res
def __lowercase ( _a , _a , _a ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_lowerCAmelCase , _lowerCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCAmelCase , _lowerCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , _lowerCAmelCase , _lowerCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCAmelCase , _lowerCAmelCase )
def __lowercase ( _a , _a , _a ):
for n in graph_proto.node:
_node_replace_input_with(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __lowercase ( _a , _a , _a ):
snake_case_ : str = list(model.graph.initializer )
snake_case_ : int = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case_ : int = inits[i].name
snake_case_ : Optional[Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _lowerCAmelCase , _lowerCAmelCase )
def __lowercase ( _a ):
snake_case_ : Union[str, Any] = os.path.dirname(_lowerCAmelCase )
snake_case_ : List[Any] = os.path.basename(_lowerCAmelCase )
snake_case_ : int = onnx.load(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
snake_case_ : str = list(model.graph.initializer )
snake_case_ : int = set()
snake_case_ : Optional[Any] = {}
snake_case_ : List[Any] = []
snake_case_ : Optional[int] = 0
for i in range(len(_lowerCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_lowerCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_lowerCAmelCase )
dup_set.add(_lowerCAmelCase )
snake_case_ : List[Any] = inits[j].data_type
snake_case_ : Dict = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , _lowerCAmelCase )
total_reduced_size += mem_size
snake_case_ : Optional[Any] = inits[i].name
snake_case_ : str = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_lowerCAmelCase )
else:
snake_case_ : Optional[Any] = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' )
snake_case_ : Optional[int] = sorted(_lowerCAmelCase )
_remove_dup_initializers_from_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
snake_case_ : List[str] = """optimized_""" + model_file_name
snake_case_ : Union[str, Any] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
onnx.save(_lowerCAmelCase , _lowerCAmelCase )
return new_model
| 123 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
return f'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 4, 64, 64), lowerCamelCase__=False ):
A : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return image
def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__="CompVis/stable-diffusion-v1-4" ):
A : str = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = """bf16""" if fpaa else None
A , A : str = FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase__, subfolder="""unet""", dtype=lowerCamelCase__, revision=lowerCamelCase__ )
return model, params
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 77, 768), lowerCamelCase__=False ):
A : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
A : List[str] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : List[str] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""", fpaa=lowerCamelCase__ )
A : str = self.get_latents(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : int = self.get_encoder_hidden_states(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : Optional[Any] = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : Dict = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""", fpaa=lowerCamelCase__ )
A : int = self.get_latents(lowerCamelCase__, shape=(4, 4, 96, 96), fpaa=lowerCamelCase__ )
A : Union[str, Any] = self.get_encoder_hidden_states(lowerCamelCase__, shape=(4, 77, 1024), fpaa=lowerCamelCase__ )
A : Dict = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : List[Any] = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
| 662 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _snake_case ( SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__: Optional[Any] = ["image_processor", "tokenizer"]
lowerCamelCase__: int = "BlipImageProcessor"
lowerCamelCase__: int = ("BertTokenizer", "BertTokenizerFast")
def __init__( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: str ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = False
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = self.image_processor
def __call__( self: int , __lowerCamelCase: Optional[Any] = None , __lowerCamelCase: List[Any] = None , __lowerCamelCase: str = True , __lowerCamelCase: str = False , __lowerCamelCase: List[str] = None , __lowerCamelCase: List[Any] = None , __lowerCamelCase: List[str] = 0 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Union[str, Any] = None , __lowerCamelCase: Union[str, Any] = False , __lowerCamelCase: Optional[int] = False , __lowerCamelCase: Tuple = False , __lowerCamelCase: Union[str, Any] = False , __lowerCamelCase: Dict = False , __lowerCamelCase: Any = True , __lowerCamelCase: int = None , **__lowerCamelCase: str , ) -> List[str]:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
__UpperCAmelCase : List[Any] = self.tokenizer
__UpperCAmelCase : List[Any] = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
return text_encoding
# add pixel_values
__UpperCAmelCase : Dict = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
if text is not None:
__UpperCAmelCase : Union[str, Any] = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
else:
__UpperCAmelCase : Any = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def _lowerCamelCase ( self: Union[str, Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: str ) -> Dict:
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def _lowerCamelCase ( self: Any , *__lowerCamelCase: str , **__lowerCamelCase: Any ) -> Optional[Any]:
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def _lowerCamelCase ( self: Dict ) -> List[Any]:
__UpperCAmelCase : Tuple = self.tokenizer.model_input_names
__UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 382 |
from typing import Any
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Any = v.conjugate().T
A : List[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A : str = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 662 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a__ = logging.get_logger(__name__) # pylint: disable=invalid-name
a__ = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def _UpperCAmelCase ( a : Optional[int] , a : List[Any] , a : List[Any]=8 ):
snake_case__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Any , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , movq=lowerCamelCase__ , )
snake_case__ = 2 ** (len(self.movq.config.block_out_channels) - 1)
def __magic_name__ ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
if latents is None:
snake_case__ = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__)
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''')
snake_case__ = latents.to(lowerCamelCase__)
snake_case__ = latents * scheduler.init_noise_sigma
return latents
def __magic_name__ ( self : int , UpperCamelCase__ : int=0):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""")
snake_case__ = torch.device(F'''cuda:{gpu_id}''')
snake_case__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase__ , lowerCamelCase__)
def __magic_name__ ( self : List[str] , UpperCamelCase__ : List[Any]=0):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0"""):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""")
snake_case__ = torch.device(F'''cuda:{gpu_id}''')
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowerCamelCase__)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case__ = cpu_offload_with_hook(lowerCamelCase__ , lowerCamelCase__ , prev_module_hook=lowerCamelCase__)
# We'll offload the last model manually.
snake_case__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __magic_name__ ( self : Dict):
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook"""):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase__ , """_hf_hook""")
and hasattr(module._hf_hook , """execution_device""")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase__)
def __call__( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] = 5_1_2 , UpperCamelCase__ : str = 5_1_2 , UpperCamelCase__ : int = 1_0_0 , UpperCamelCase__ : Tuple = 4.0 , UpperCamelCase__ : int = 1 , UpperCamelCase__ : Optional[Any] = None , UpperCamelCase__ : List[Any] = None , UpperCamelCase__ : List[Any] = "pil" , UpperCamelCase__ : Union[str, Any] = True , ):
'''simple docstring'''
snake_case__ = self._execution_device
snake_case__ = guidance_scale > 1.0
if isinstance(lowerCamelCase__ , lowerCamelCase__):
snake_case__ = torch.cat(lowerCamelCase__ , dim=0)
snake_case__ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowerCamelCase__ , lowerCamelCase__):
snake_case__ = torch.cat(lowerCamelCase__ , dim=0)
if do_classifier_free_guidance:
snake_case__ = image_embeds.repeat_interleave(lowerCamelCase__ , dim=0)
snake_case__ = negative_image_embeds.repeat_interleave(lowerCamelCase__ , dim=0)
snake_case__ = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowerCamelCase__)
self.scheduler.set_timesteps(lowerCamelCase__ , device=lowerCamelCase__)
snake_case__ = self.scheduler.timesteps
snake_case__ = self.unet.config.in_channels
snake_case__ = downscale_height_and_width(lowerCamelCase__ , lowerCamelCase__ , self.movq_scale_factor)
# create initial latent
snake_case__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCamelCase__)):
# expand the latents if we are doing classifier free guidance
snake_case__ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
snake_case__ = {"""image_embeds""": image_embeds}
snake_case__ = self.unet(
sample=lowerCamelCase__ , timestep=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , added_cond_kwargs=lowerCamelCase__ , return_dict=lowerCamelCase__ , )[0]
if do_classifier_free_guidance:
snake_case__ = noise_pred.split(latents.shape[1] , dim=1)
snake_case__ = noise_pred.chunk(2)
snake_case__ = variance_pred.chunk(2)
snake_case__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case__ = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , """variance_type""")
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case__ = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
snake_case__ = self.scheduler.step(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ , )[0]
# post-processing
snake_case__ = self.movq.decode(lowerCamelCase__ , force_not_quantize=lowerCamelCase__)["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''')
if output_type in ["np", "pil"]:
snake_case__ = image * 0.5 + 0.5
snake_case__ = image.clamp(0 , 1)
snake_case__ = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
snake_case__ = self.numpy_to_pil(lowerCamelCase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__)
| 654 |
from __future__ import annotations
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
A , A : int = np.shape(_lowerCAmelCase )
if rows != columns:
A : Union[str, Any] = (
"""'table' has to be of square shaped array but got a """
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_lowerCAmelCase )
A : Union[str, Any] = np.zeros((rows, columns) )
A : Dict = np.zeros((rows, columns) )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
A : Any = (table[i][j] - total) / upper[j][j]
A : Union[str, Any] = 1
for j in range(_lowerCAmelCase , _lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
A : str = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 0 |
def lowerCAmelCase_ ( __a = 50 ) -> int:
"""simple docstring"""
lowerCamelCase__: List[Any] =[1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 59 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None ):
A : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
A : Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
A : Any = math.ceil(val / multiple ) * multiple
return x
A : Optional[Any] = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size
A , A : List[Any] = get_image_size(_lowerCAmelCase )
A , A : List[Any] = output_size
# determine new height and width
A : Optional[int] = output_height / input_height
A : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
A : Any = scale_width
else:
# fit height
A : int = scale_height
A : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase )
A : int = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : int = size if size is not None else {"""height""": 384, """width""": 384}
A : str = get_size_dict(lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Optional[int] = size
A : Union[str, Any] = keep_aspect_ratio
A : int = ensure_multiple_of
A : Dict = resample
A : Optional[Any] = do_rescale
A : Any = rescale_factor
A : str = do_normalize
A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Dict = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
A : Optional[Any] = get_resize_output_image_size(
lowerCamelCase__, output_size=(size["""height"""], size["""width"""]), keep_aspect_ratio=lowerCamelCase__, multiple=lowerCamelCase__, )
return resize(lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A : str = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__ )
A : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
A : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
A : Tuple = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A : int = do_normalize if do_normalize is not None else self.do_normalize
A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
A : Optional[int] = image_std if image_std is not None else self.image_std
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : str = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Dict = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : Optional[Any] = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Dict = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCamelCase__ ):
A : int = target_sizes.numpy()
A : Union[str, Any] = []
for idx in range(len(lowerCamelCase__ ) ):
A : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="""bilinear""", align_corners=lowerCamelCase__ )
A : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
A : List[str] = logits.argmax(dim=1 )
A : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 662 | 0 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["vqvae"]
def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , mel=lowerCamelCase__ , vqvae=lowerCamelCase__ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return 50 if isinstance(self.scheduler , lowerCamelCase__ ) else 10_00
@torch.no_grad()
def __call__( self : str , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : str = None , lowerCamelCase_ : Tuple = None , lowerCamelCase_ : Union[str, Any] = 0 , lowerCamelCase_ : List[str] = 0 , lowerCamelCase_ : str = None , lowerCamelCase_ : Optional[Any] = None , lowerCamelCase_ : Dict = 0 , lowerCamelCase_ : Union[str, Any] = 0 , lowerCamelCase_ : Optional[Any] = None , lowerCamelCase_ : str = 0 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = None , lowerCamelCase_ : str=True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowerCamelCase__ )
SCREAMING_SNAKE_CASE : str = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
SCREAMING_SNAKE_CASE : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
SCREAMING_SNAKE_CASE : List[str] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowerCamelCase__ , device=self.device , )
SCREAMING_SNAKE_CASE : Tuple = noise
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowerCamelCase__ , lowerCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.mel.audio_slice_to_image(lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
SCREAMING_SNAKE_CASE : Any = (input_image / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
SCREAMING_SNAKE_CASE : Tuple = self.vqvae.encode(torch.unsqueeze(lowerCamelCase__ , 0 ) ).latent_dist.sample(
generator=lowerCamelCase__ )[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.add_noise(lowerCamelCase__ , lowerCamelCase__ , self.scheduler.timesteps[start_step - 1] )
SCREAMING_SNAKE_CASE : str = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
SCREAMING_SNAKE_CASE : Optional[Any] = int(mask_start_secs * pixels_per_second )
SCREAMING_SNAKE_CASE : List[str] = int(mask_end_secs * pixels_per_second )
SCREAMING_SNAKE_CASE : Tuple = self.scheduler.add_noise(lowerCamelCase__ , lowerCamelCase__ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowerCamelCase__ ):
SCREAMING_SNAKE_CASE : int = self.unet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )["""sample"""]
else:
SCREAMING_SNAKE_CASE : List[Any] = self.unet(lowerCamelCase__ , lowerCamelCase__ )["""sample"""]
if isinstance(self.scheduler , lowerCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.step(
model_output=lowerCamelCase__ , timestep=lowerCamelCase__ , sample=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , )["""prev_sample"""]
else:
SCREAMING_SNAKE_CASE : Any = self.scheduler.step(
model_output=lowerCamelCase__ , timestep=lowerCamelCase__ , sample=lowerCamelCase__ , generator=lowerCamelCase__ , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
SCREAMING_SNAKE_CASE : List[str] = mask[:, step, :, :mask_start]
if mask_end > 0:
SCREAMING_SNAKE_CASE : Tuple = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
SCREAMING_SNAKE_CASE : Any = 1 / self.vqvae.config.scaling_factor * images
SCREAMING_SNAKE_CASE : List[str] = self.vqvae.decode(lowerCamelCase__ )["""sample"""]
SCREAMING_SNAKE_CASE : Dict = (images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE : int = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = (images * 2_55).round().astype("""uint8""" )
SCREAMING_SNAKE_CASE : Tuple = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowerCamelCase__ , mode="""RGB""" ).convert("""L""" ) for _ in images) )
SCREAMING_SNAKE_CASE : Dict = [self.mel.image_to_audio(lowerCamelCase__ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowerCamelCase__ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowerCamelCase__ ) )
@torch.no_grad()
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Tuple = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler , lowerCamelCase__ )
self.scheduler.set_timesteps(lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
SCREAMING_SNAKE_CASE : Optional[Any] = (sample / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE : int = torch.Tensor(lowerCamelCase__ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
SCREAMING_SNAKE_CASE : List[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.alphas_cumprod[t]
SCREAMING_SNAKE_CASE : str = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE : int = self.unet(lowerCamelCase__ , lowerCamelCase__ )["""sample"""]
SCREAMING_SNAKE_CASE : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
SCREAMING_SNAKE_CASE : Dict = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
SCREAMING_SNAKE_CASE : int = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def lowerCamelCase_ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = acos(torch.dot(torch.flatten(lowerCamelCase__ ) , torch.flatten(lowerCamelCase__ ) ) / torch.norm(lowerCamelCase__ ) / torch.norm(lowerCamelCase__ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowerCamelCase__ ) + sin(alpha * theta ) * xa / sin(lowerCamelCase__ )
| 379 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
# we need a list not a string, so do something to change the type
A : List[Any] = arr.split(""",""" )
def _lowerCAmelCase ( self ):
A : int = [int(self.array[0] )] * len(self.array )
A : Optional[Any] = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
A : Union[str, Any] = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
A : Dict = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:int = input("""please input some numbers:""")
SCREAMING_SNAKE_CASE_:Dict = SubArray(whole_array)
SCREAMING_SNAKE_CASE_:Optional[int] = array.solve_sub_array()
print(("""the results is:""", re))
| 662 | 0 |
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , _snake_case : Union[str, Any] , _snake_case : List[str]=13 , _snake_case : List[str]=7 , _snake_case : List[Any]=True , _snake_case : List[str]=True , _snake_case : Optional[int]=99 , _snake_case : Any=32 , _snake_case : int=5 , _snake_case : List[str]=4 , _snake_case : Optional[Any]=37 , _snake_case : List[str]="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : int=0.1 , _snake_case : Optional[Any]=50 , _snake_case : Union[str, Any]=0.02 , _snake_case : Dict=True , _snake_case : Any=None , ) -> Optional[int]:
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_mask
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = initializer_range
a__ = use_labels
a__ = scope
def _lowerCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
(
a__
) = self.prepare_config_and_inputs()
a__ = True
a__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase ( self : Union[str, Any] , _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : Optional[int] , _snake_case : int , **_snake_case : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
a__ = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
a__ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : str , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : Union[str, Any] , **_snake_case : Union[str, Any] , ) -> Any:
'''simple docstring'''
a__ = True
a__ = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , )
a__ = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Optional[int] , _snake_case : Tuple , _snake_case : int , _snake_case : int , _snake_case : Tuple , _snake_case : int , _snake_case : Any , **_snake_case : Optional[Any] , ) -> Dict:
'''simple docstring'''
a__ = True
a__ = True
a__ = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
# first forward pass
a__ = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ , )
a__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
a__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a__ = torch.cat([input_ids, next_tokens] , dim=-1 )
a__ = torch.cat([input_mask, next_mask] , dim=-1 )
a__ = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
a__ = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
# select random slice
a__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a__ = output_from_no_past[:, -3:, random_slice_idx].detach()
a__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
def _lowerCAmelCase ( self : Tuple , _snake_case : Any , _snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : List[str] , *_snake_case : List[Any] , ) -> str:
'''simple docstring'''
a__ = BertGenerationDecoder(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
a__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
a_ : Any =(BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
a_ : int =(BertGenerationDecoder,) if is_torch_available() else ()
a_ : List[Any] =(
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self : List[Any] ) -> Any:
'''simple docstring'''
a__ = BertGenerationEncoderTester(self )
a__ = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def _lowerCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self : int ) -> str:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
a__ = """bert"""
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def _lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ )
def _lowerCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ )
def _lowerCAmelCase ( self : int ) -> int:
'''simple docstring'''
(
a__
) = self.model_tester.prepare_config_and_inputs_for_decoder()
a__ = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
def _lowerCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self : Any ) -> str:
'''simple docstring'''
a__ = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
a__ = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
a__ = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
a__ = model(lowerCamelCase__ )[0]
a__ = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , lowerCamelCase__ )
a__ = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
a__ = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
a__ = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
a__ = model(lowerCamelCase__ )[0]
a__ = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape , lowerCamelCase__ )
a__ = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 232 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:List[Any] = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "bit"
__lowerCamelCase : Union[str, Any] = ["preactivation", "bottleneck"]
__lowerCamelCase : Union[str, Any] = ["SAME", "VALID"]
def __init__( self, lowerCamelCase__=3, lowerCamelCase__=64, lowerCamelCase__=[256, 512, 1024, 2048], lowerCamelCase__=[3, 4, 6, 3], lowerCamelCase__="preactivation", lowerCamelCase__="relu", lowerCamelCase__=None, lowerCamelCase__=32, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__=32, lowerCamelCase__=1, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A : List[Any] = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
A : Dict = num_channels
A : List[Any] = embedding_size
A : Optional[Any] = hidden_sizes
A : str = depths
A : str = layer_type
A : Union[str, Any] = hidden_act
A : Any = global_padding
A : Optional[int] = num_groups
A : Dict = drop_path_rate
A : List[Any] = embedding_dynamic_padding
A : List[Any] = output_stride
A : Union[str, Any] = width_factor
A : Dict = ["""stem"""] + [f'''stage{idx}''' for idx in range(1, len(lowerCamelCase__ ) + 1 )]
A , A : Any = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__, out_indices=lowerCamelCase__, stage_names=self.stage_names )
| 662 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case ) -> list:
__lowercase = []
__lowercase = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__lowercase = result + left + right
return input_list
def SCREAMING_SNAKE_CASE ( snake_case ) -> list:
if len(_lowerCAmelCase ) <= 1:
return input_list
__lowercase = list(_lowerCAmelCase )
# iteration for two-way merging
__lowercase = 2
while p <= len(_lowerCAmelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(_lowerCAmelCase ) , _lowerCAmelCase ):
__lowercase = i
__lowercase = i + p - 1
__lowercase = (low + high + 1) // 2
__lowercase = merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# final merge of last two parts
if p * 2 >= len(_lowerCAmelCase ):
__lowercase = i
__lowercase = merge(_lowerCAmelCase , 0 , _lowerCAmelCase , len(_lowerCAmelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
if user_input == "":
SCREAMING_SNAKE_CASE_ : Dict = []
else:
SCREAMING_SNAKE_CASE_ : str = [int(item.strip()) for item in user_input.split(''',''')]
print(iter_merge_sort(unsorted))
| 375 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=50, lowerCamelCase__=0.02, lowerCamelCase__=True, lowerCamelCase__=None, ):
A : List[str] = parent
A : List[str] = batch_size
A : Optional[int] = seq_length
A : Optional[int] = is_training
A : Tuple = use_input_mask
A : Optional[Any] = vocab_size
A : str = hidden_size
A : Any = num_hidden_layers
A : List[Any] = num_attention_heads
A : Optional[int] = intermediate_size
A : int = hidden_act
A : Dict = hidden_dropout_prob
A : Optional[Any] = attention_probs_dropout_prob
A : List[Any] = max_position_embeddings
A : int = initializer_range
A : Tuple = use_labels
A : List[str] = scope
def _lowerCAmelCase ( self ):
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : int = None
if self.use_input_mask:
A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCAmelCase ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, )
def _lowerCAmelCase ( self ):
(
(
A
) , (
A
) , (
A
) , (
A
) ,
) : List[Any] = self.prepare_config_and_inputs()
A : Any = True
A : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : str = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Optional[int] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ )
A : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : List[str] = True
A : Union[str, Any] = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, )
A : Optional[Any] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : Union[str, Any] = True
A : Optional[int] = True
A : Optional[int] = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
# first forward pass
A : int = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, use_cache=lowerCamelCase__, )
A : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
A : int = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
A : List[str] = torch.cat([input_ids, next_tokens], dim=-1 )
A : Union[str, Any] = torch.cat([input_mask, next_mask], dim=-1 )
A : List[str] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
# select random slice
A : Any = ids_tensor((1,), output_from_past.shape[-1] ).item()
A : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
A : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__, ):
A : Optional[int] = BertGenerationDecoder(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : List[str] = model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self ):
A , A , A , A : str = self.prepare_config_and_inputs()
A : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__lowerCamelCase : int = (BertGenerationDecoder,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self ):
A : Any = BertGenerationEncoderTester(self )
A : Optional[int] = ConfigTester(self, config_class=lowerCamelCase__, hidden_size=37 )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A , A , A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
A : Any = """bert"""
self.model_tester.create_and_check_model(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
A : int = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, )
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
A : Tuple = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : Optional[int] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Union[str, Any] = model(lowerCamelCase__ )[0]
A : List[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Tuple = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Dict = model(lowerCamelCase__ )[0]
A : List[str] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Optional[Any] = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
| 662 | 0 |
def A_ ( _UpperCAmelCase = 10**12 ):
SCREAMING_SNAKE_CASE_: Tuple = 1
SCREAMING_SNAKE_CASE_: Dict = 0
SCREAMING_SNAKE_CASE_: int = 1
SCREAMING_SNAKE_CASE_: Optional[int] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 671 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : str = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 384}
A : Optional[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Dict = size
# Default value set here for backwards compatibility where the value in config is None
A : Dict = crop_pct if crop_pct is not None else 224 / 256
A : Optional[int] = resample
A : List[str] = do_rescale
A : Tuple = rescale_factor
A : Optional[int] = do_normalize
A : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
A : List[str] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : int = int(shortest_edge / crop_pct )
A : List[Any] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Dict = do_resize if do_resize is not None else self.do_resize
A : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
A : str = resample if resample is not None else self.resample
A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Dict = do_normalize if do_normalize is not None else self.do_normalize
A : List[str] = image_mean if image_mean is not None else self.image_mean
A : Optional[Any] = image_std if image_std is not None else self.image_std
A : Optional[Any] = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : List[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : str = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Tuple = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
| 662 | 0 |
import json
import sys
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
__lowercase = json.load(_lowerCAmelCase )
__lowercase = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(_lowerCAmelCase ):
__lowercase = results[benchmark_name]
__lowercase = benchmark_name.split("/" )[-1]
output_md.append(f'''### Benchmark: {benchmark_file_name}''' )
__lowercase = """| metric |"""
__lowercase = """|--------|"""
__lowercase = """| new / old (diff) |"""
for metric_name in sorted(_lowerCAmelCase ):
__lowercase = benchmark_res[metric_name]
__lowercase = metric_vals["""new"""]
__lowercase = metric_vals.get("old" , _lowerCAmelCase )
__lowercase = metric_vals.get("diff" , _lowerCAmelCase )
__lowercase = f''' {new_val:f}''' if isinstance(_lowerCAmelCase , (int, float) ) else """None"""
if old_val is not None:
val_str += f''' / {old_val:f}''' if isinstance(_lowerCAmelCase , (int, float) ) else "None"
if dif_val is not None:
val_str += f''' ({dif_val:f})''' if isinstance(_lowerCAmelCase , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(_lowerCAmelCase ) )
if __name__ == "__main__":
lowerCAmelCase__ = sys.argv[1]
lowerCAmelCase__ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 321 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
A : Dict = """backbone.""" if is_semantic else """"""
A : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
A : Dict = """backbone.""" if is_semantic else """"""
# queries, keys and values
A : Union[str, Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A : Optional[int] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A : int = in_proj_weight[
: config.hidden_size, :
]
A : Any = q_bias
A : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A : Tuple = in_proj_weight[
-config.hidden_size :, :
]
A : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A : str = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A : Dict = gamma_a
A : Dict = gamma_a
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : List[str] = dct.pop(_lowerCAmelCase )
A : Optional[Any] = val
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
A : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
"""simple docstring"""
A : Dict = False if """rvlcdip""" in checkpoint_url else True
A : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A : Dict = 1024
A : List[Any] = 4096
A : int = 24
A : int = 16
# labels
if "rvlcdip" in checkpoint_url:
A : List[Any] = 16
A : List[Any] = """huggingface/label-files"""
A : int = """rvlcdip-id2label.json"""
A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A : int = idalabel
A : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
A : str = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase )
# load HuggingFace model
A : Any = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A : Any = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase )
A : int = prepare_img()
A : Tuple = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
A : str = encoding["""pixel_values"""]
A : Tuple = model(_lowerCAmelCase )
A : Optional[int] = outputs.logits
# verify logits
A : Tuple = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
A : Any = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
A : List[Any] = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 662 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 175 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""", lowerCamelCase__, )
super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
| 662 | 0 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Dict:
lowerCamelCase__ : int = fname.split(os.path.sep )[-1]
return re.search(r'^(.*)_\d+\.jpg$' , _lowerCAmelCase ).groups()[0]
class lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : int=None , UpperCAmelCase : Optional[Any]=None ) -> Tuple:
lowerCamelCase__ : Any = file_names
lowerCamelCase__ : Tuple = image_transform
lowerCamelCase__ : List[Any] = label_to_id
def __len__( self : Optional[int] ) -> List[str]:
return len(self.file_names )
def __getitem__( self : Dict , UpperCAmelCase : str ) -> Optional[int]:
lowerCamelCase__ : int = self.file_names[idx]
lowerCamelCase__ : str = PIL.Image.open(lowerCamelCase__ )
lowerCamelCase__ : Optional[int] = raw_image.convert('RGB' )
if self.image_transform is not None:
lowerCamelCase__ : int = self.image_transform(lowerCamelCase__ )
lowerCamelCase__ : Union[str, Any] = extract_label(lowerCamelCase__ )
if self.label_to_id is not None:
lowerCamelCase__ : str = self.label_to_id[label]
return {"image": image, "label": label}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Any:
if args.with_tracking:
lowerCamelCase__ : Optional[int] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
lowerCamelCase__ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ : List[Any] = config["""lr"""]
lowerCamelCase__ : List[Any] = int(config['num_epochs'] )
lowerCamelCase__ : str = int(config['seed'] )
lowerCamelCase__ : Optional[int] = int(config['batch_size'] )
lowerCamelCase__ : Optional[int] = config["""image_size"""]
if not isinstance(_lowerCAmelCase , (list, tuple) ):
lowerCamelCase__ : Optional[int] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
lowerCamelCase__ : Any = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowerCamelCase__ : Optional[Any] = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
lowerCamelCase__ : Dict = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowerCamelCase__ : Optional[int] = os.path.split(_lowerCAmelCase )[-1].split('.' )[0]
accelerator.init_trackers(_lowerCAmelCase , _lowerCAmelCase )
# Grab all the image filenames
lowerCamelCase__ : Union[str, Any] = [os.path.join(args.data_dir , _lowerCAmelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
lowerCamelCase__ : Tuple = [extract_label(_lowerCAmelCase ) for fname in file_names]
lowerCamelCase__ : Union[str, Any] = list(set(_lowerCAmelCase ) )
id_to_label.sort()
lowerCamelCase__ : Dict = {lbl: i for i, lbl in enumerate(_lowerCAmelCase )}
# Set the seed before splitting the data.
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# Split our filenames between train and validation
lowerCamelCase__ : int = np.random.permutation(len(_lowerCAmelCase ) )
lowerCamelCase__ : Optional[Any] = int(0.8 * len(_lowerCAmelCase ) )
lowerCamelCase__ : Optional[Any] = random_perm[:cut]
lowerCamelCase__ : Union[str, Any] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowerCamelCase__ : Tuple = Compose([RandomResizedCrop(_lowerCAmelCase , scale=(0.5, 1.0) ), ToTensor()] )
lowerCamelCase__ : str = PetsDataset(
[file_names[i] for i in train_split] , image_transform=_lowerCAmelCase , label_to_id=_lowerCAmelCase )
# For evaluation, we use a deterministic Resize
lowerCamelCase__ : Optional[int] = Compose([Resize(_lowerCAmelCase ), ToTensor()] )
lowerCamelCase__ : List[Any] = PetsDataset([file_names[i] for i in eval_split] , image_transform=_lowerCAmelCase , label_to_id=_lowerCAmelCase )
# Instantiate dataloaders.
lowerCamelCase__ : int = DataLoader(_lowerCAmelCase , shuffle=_lowerCAmelCase , batch_size=_lowerCAmelCase , num_workers=4 )
lowerCamelCase__ : Optional[int] = DataLoader(_lowerCAmelCase , shuffle=_lowerCAmelCase , batch_size=_lowerCAmelCase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ : Any = create_model('resnet50d' , pretrained=_lowerCAmelCase , num_classes=len(_lowerCAmelCase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ : Tuple = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowerCamelCase__ : List[str] = False
for param in model.get_classifier().parameters():
lowerCamelCase__ : str = True
# We normalize the batches of images to be a bit faster.
lowerCamelCase__ : Optional[int] = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
lowerCamelCase__ : List[Any] = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ : int = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
lowerCamelCase__ : str = OneCycleLR(optimizer=_lowerCAmelCase , max_lr=_lowerCAmelCase , epochs=_lowerCAmelCase , steps_per_epoch=len(_lowerCAmelCase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ : str = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase__ : Any = 0
# We also need to keep track of the starting epoch so files are named properly
lowerCamelCase__ : List[Any] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase__ : Tuple = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowerCamelCase__ : int = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowerCamelCase__ : int = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowerCamelCase__ : Dict = os.path.splitext(_lowerCAmelCase )[0]
if "epoch" in training_difference:
lowerCamelCase__ : Optional[int] = int(training_difference.replace('epoch_' , '' ) ) + 1
lowerCamelCase__ : str = None
else:
lowerCamelCase__ : int = int(training_difference.replace('step_' , '' ) )
lowerCamelCase__ : Dict = resume_step // len(_lowerCAmelCase )
resume_step -= starting_epoch * len(_lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
model.train()
if args.with_tracking:
lowerCamelCase__ : str = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowerCamelCase__ : Union[str, Any] = accelerator.skip_first_batches(_lowerCAmelCase , _lowerCAmelCase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowerCamelCase__ : Union[str, Any] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase__ : Optional[int] = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase__ : Union[str, Any] = (batch["""image"""] - mean) / std
lowerCamelCase__ : Optional[Any] = model(_lowerCAmelCase )
lowerCamelCase__ : Union[str, Any] = torch.nn.functional.cross_entropy(_lowerCAmelCase , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(_lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ : Tuple = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowerCamelCase__ : int = os.path.join(args.output_dir , _lowerCAmelCase )
accelerator.save_state(_lowerCAmelCase )
model.eval()
lowerCamelCase__ : str = 0
lowerCamelCase__ : Any = 0
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase__ : Optional[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase__ : Optional[int] = (batch["""image"""] - mean) / std
with torch.no_grad():
lowerCamelCase__ : List[str] = model(_lowerCAmelCase )
lowerCamelCase__ : Dict = outputs.argmax(dim=-1 )
lowerCamelCase__ : str = accelerator.gather_for_metrics((predictions, batch['label']) )
lowerCamelCase__ : List[str] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowerCamelCase__ : Optional[Any] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(_lowerCAmelCase ),
'epoch': epoch,
} , step=_lowerCAmelCase , )
if checkpointing_steps == "epoch":
lowerCamelCase__ : List[Any] = F"""epoch_{epoch}"""
if args.output_dir is not None:
lowerCamelCase__ : Optional[Any] = os.path.join(args.output_dir , _lowerCAmelCase )
accelerator.save_state(_lowerCAmelCase )
if args.with_tracking:
accelerator.end_training()
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
lowerCamelCase__ : List[Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=_lowerCAmelCase , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=_lowerCAmelCase , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
lowerCamelCase__ : int = parser.parse_args()
lowerCamelCase__ : Any = {"""lr""": 3e-2, """num_epochs""": 3, """seed""": 42, """batch_size""": 64, """image_size""": 224}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 295 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(
lowerCamelCase__, split=lowerCamelCase__, features=lowerCamelCase__, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__, streaming=lowerCamelCase__, num_proc=lowerCamelCase__, **lowerCamelCase__, )
A : List[Any] = path_or_paths if isinstance(lowerCamelCase__, lowerCamelCase__ ) else {self.split: path_or_paths}
A : str = Text(
cache_dir=lowerCamelCase__, data_files=lowerCamelCase__, features=lowerCamelCase__, **lowerCamelCase__, )
def _lowerCAmelCase ( self ):
# Build iterable dataset
if self.streaming:
A : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A : List[str] = None
A : Dict = None
A : Tuple = None
A : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__, download_mode=lowerCamelCase__, verification_mode=lowerCamelCase__, base_path=lowerCamelCase__, num_proc=self.num_proc, )
A : List[str] = self.builder.as_dataset(
split=self.split, verification_mode=lowerCamelCase__, in_memory=self.keep_in_memory )
return dataset
| 662 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __lowercase ( _a ):
create_state_space_tree(_lowerCAmelCase , [] , 0 )
def __lowercase ( _a , _a , _a ):
if index == len(_lowerCAmelCase ):
print(_lowerCAmelCase )
return
create_state_space_tree(_lowerCAmelCase , _lowerCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_lowerCAmelCase , _lowerCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowercase__ : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 123 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
SCREAMING_SNAKE_CASE_:int = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 662 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: Dict ) -> Optional[int]:
__UpperCAmelCase : List[str] = tempfile.mkdtemp()
__UpperCAmelCase : Any = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
__UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__UpperCAmelCase : List[Any] = {
"""do_resize""": True,
"""size""": {"""height""": 2_24, """width""": 2_24},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"""image_std""": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
"""do_convert_rgb""": True,
}
__UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , lowerCamelCase__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
def _lowerCamelCase ( self: Optional[Any] , **__lowerCamelCase: Tuple ) -> Dict:
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _lowerCamelCase ( self: Union[str, Any] , **__lowerCamelCase: Dict ) -> Tuple:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _lowerCamelCase ( self: Tuple , **__lowerCamelCase: List[Any] ) -> List[Any]:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _lowerCamelCase ( self: Any ) -> str:
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self: Tuple ) -> Any:
__UpperCAmelCase : Optional[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__UpperCAmelCase : Union[str, Any] = [Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : Any = self.get_rust_tokenizer()
__UpperCAmelCase : Any = self.get_image_processor()
__UpperCAmelCase : Optional[int] = ChineseCLIPProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
__UpperCAmelCase : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase__ )
__UpperCAmelCase : Optional[int] = ChineseCLIPProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
__UpperCAmelCase : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase__ )
def _lowerCamelCase ( self: Dict ) -> Any:
__UpperCAmelCase : Dict = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Dict = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
__UpperCAmelCase : List[str] = self.get_image_processor(do_normalize=lowerCamelCase__ )
__UpperCAmelCase : List[Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=lowerCamelCase__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def _lowerCamelCase ( self: Any ) -> Optional[Any]:
__UpperCAmelCase : Tuple = self.get_image_processor()
__UpperCAmelCase : int = self.get_tokenizer()
__UpperCAmelCase : int = ChineseCLIPProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__UpperCAmelCase : str = self.prepare_image_inputs()
__UpperCAmelCase : List[str] = image_processor(lowerCamelCase__ , return_tensors="np" )
__UpperCAmelCase : Optional[Any] = processor(images=lowerCamelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCamelCase ( self: List[Any] ) -> Dict:
__UpperCAmelCase : Dict = self.get_image_processor()
__UpperCAmelCase : Tuple = self.get_tokenizer()
__UpperCAmelCase : str = ChineseCLIPProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__UpperCAmelCase : Any = """Alexandra,T-shirt的价格是15便士。"""
__UpperCAmelCase : Any = processor(text=lowerCamelCase__ )
__UpperCAmelCase : Optional[int] = tokenizer(lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCamelCase ( self: int ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = self.get_image_processor()
__UpperCAmelCase : int = self.get_tokenizer()
__UpperCAmelCase : Optional[int] = ChineseCLIPProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__UpperCAmelCase : Optional[int] = """Alexandra,T-shirt的价格是15便士。"""
__UpperCAmelCase : Union[str, Any] = self.prepare_image_inputs()
__UpperCAmelCase : List[str] = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCamelCase ( self: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = self.get_image_processor()
__UpperCAmelCase : List[Any] = self.get_tokenizer()
__UpperCAmelCase : Dict = ChineseCLIPProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__UpperCAmelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : int = processor.batch_decode(lowerCamelCase__ )
__UpperCAmelCase : Any = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _lowerCamelCase ( self: Any ) -> Tuple:
__UpperCAmelCase : int = self.get_image_processor()
__UpperCAmelCase : List[Any] = self.get_tokenizer()
__UpperCAmelCase : int = ChineseCLIPProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = """Alexandra,T-shirt的价格是15便士。"""
__UpperCAmelCase : Dict = self.prepare_image_inputs()
__UpperCAmelCase : Optional[Any] = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 382 |
def __UpperCamelCase ( _lowerCAmelCase = 1000 ) -> int:
"""simple docstring"""
A , A : str = 1, 1
A : List[Any] = []
for i in range(1 , n + 1 ):
A : Optional[int] = prev_numerator + 2 * prev_denominator
A : Any = prev_numerator + prev_denominator
if len(str(_lowerCAmelCase ) ) > len(str(_lowerCAmelCase ) ):
result.append(_lowerCAmelCase )
A : int = numerator
A : int = denominator
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 662 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
a__ = logging.get_logger(__name__)
a__ = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
a__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _UpperCAmelCase ( a : Optional[Any] ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
snake_case__ = model_type_to_module_name(_lowerCAmelCase )
snake_case__ = importlib.import_module(F'''.{module_name}''' , """transformers.models""" )
try:
return getattr(_lowerCAmelCase , _lowerCAmelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowerCAmelCase , """__name__""" , _lowerCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
snake_case__ = importlib.import_module("""transformers""" )
if hasattr(_lowerCAmelCase , _lowerCAmelCase ):
return getattr(_lowerCAmelCase , _lowerCAmelCase )
return None
def _UpperCAmelCase ( a : Tuple , a : Optional[int] = None , a : Any = False , a : int = False , a : int = None , a : Tuple = None , a : str = None , a : Any = False , **a : int , ):
snake_case__ = get_file_from_repo(
_lowerCAmelCase , _lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , resume_download=_lowerCAmelCase , proxies=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , local_files_only=_lowerCAmelCase , )
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(_lowerCAmelCase , encoding="""utf-8""" ) as reader:
return json.load(_lowerCAmelCase )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict):
'''simple docstring'''
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""")
@classmethod
@replace_list_option_in_docstrings(lowerCamelCase__)
def __magic_name__ ( cls : int , UpperCamelCase__ : Any , **UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = kwargs.pop("""config""" , lowerCamelCase__)
snake_case__ = kwargs.pop("""trust_remote_code""" , lowerCamelCase__)
snake_case__ = True
snake_case__ = FeatureExtractionMixin.get_feature_extractor_dict(lowerCamelCase__ , **lowerCamelCase__)
snake_case__ = config_dict.get("""feature_extractor_type""" , lowerCamelCase__)
snake_case__ = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {}):
snake_case__ = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowerCamelCase__ , lowerCamelCase__):
snake_case__ = AutoConfig.from_pretrained(lowerCamelCase__ , **lowerCamelCase__)
# It could be in `config.feature_extractor_type``
snake_case__ = getattr(lowerCamelCase__ , """feature_extractor_type""" , lowerCamelCase__)
if hasattr(lowerCamelCase__ , """auto_map""") and "AutoFeatureExtractor" in config.auto_map:
snake_case__ = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
snake_case__ = feature_extractor_class_from_name(lowerCamelCase__)
snake_case__ = feature_extractor_auto_map is not None
snake_case__ = feature_extractor_class is not None or type(lowerCamelCase__) in FEATURE_EXTRACTOR_MAPPING
snake_case__ = resolve_trust_remote_code(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
if has_remote_code and trust_remote_code:
snake_case__ = get_class_from_dynamic_module(
lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__)
snake_case__ = kwargs.pop("""code_revision""" , lowerCamelCase__)
if os.path.isdir(lowerCamelCase__):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowerCamelCase__ , **lowerCamelCase__)
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowerCamelCase__ , **lowerCamelCase__)
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowerCamelCase__) in FEATURE_EXTRACTOR_MAPPING:
snake_case__ = FEATURE_EXTRACTOR_MAPPING[type(lowerCamelCase__)]
return feature_extractor_class.from_dict(lowerCamelCase__ , **lowerCamelCase__)
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}''')
@staticmethod
def __magic_name__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int):
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(lowerCamelCase__ , lowerCamelCase__)
| 654 |
import re
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
if len(re.findall("""[ATCG]""" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
__A = None
__A = logging.get_logger(__name__)
__A = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__A = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
__A = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
__A = """▁"""
# Segments (not really needed)
__A = 0
__A = 1
__A = 2
__A = 3
__A = 4
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = "left"
lowercase_ = XLNetTokenizer
def __init__(self : Optional[Any] , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Dict="<s>" , UpperCAmelCase_ : List[str]="</s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Union[str, Any]="<sep>" , UpperCAmelCase_ : Tuple="<pad>" , UpperCAmelCase_ : Tuple="<cls>" , UpperCAmelCase_ : Tuple="<mask>" , UpperCAmelCase_ : Optional[int]=["<eop>", "<eod>"] , **UpperCAmelCase_ : List[str] , ) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__) if isinstance(lowerCamelCase__ , lowerCamelCase__) else mask_token
super().__init__(
vocab_file=lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
lowerCamelCase__: Any =3
lowerCamelCase__: Optional[Any] =do_lower_case
lowerCamelCase__: Dict =remove_space
lowerCamelCase__: Any =keep_accents
lowerCamelCase__: str =vocab_file
lowerCamelCase__: Any =False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = None) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =[self.sep_token_id]
lowerCamelCase__: int =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] = None) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[Any] =[self.sep_token_id]
lowerCamelCase__: Optional[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any = None) ->Dict:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(lowerCamelCase__):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
lowerCamelCase__: Optional[int] =os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase__):
copyfile(self.vocab_file , lowerCamelCase__)
return (out_vocab_file,)
| 59 |
from __future__ import annotations
SCREAMING_SNAKE_CASE_:Tuple = """#"""
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self ):
A : dict = {}
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = self._trie
for char in text:
if char not in trie:
A : str = {}
A : str = trie[char]
A : Optional[int] = True
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Dict = self._trie
for char in prefix:
if char in trie:
A : Optional[Any] = trie[char]
else:
return []
return self._elements(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : int = []
for c, v in d.items():
A : List[Any] = [""" """] if c == END else [(c + s) for s in self._elements(lowerCamelCase__ )]
result.extend(lowerCamelCase__ )
return tuple(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_:Any = Trie()
SCREAMING_SNAKE_CASE_:Tuple = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple:
"""simple docstring"""
A : List[str] = trie.find_word(_lowerCAmelCase )
return tuple(string + word for word in suffixes )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 662 | 0 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
__UpperCAmelCase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__UpperCAmelCase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = []
for i in range(len(_lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE : List[str] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
SCREAMING_SNAKE_CASE : Optional[Any] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(_lowerCAmelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(_lowerCAmelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(_lowerCAmelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
SCREAMING_SNAKE_CASE : Dict = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(_lowerCAmelCase )
return next_generation
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = []
for _ in range(_lowerCAmelCase ):
# Create output image
SCREAMING_SNAKE_CASE : str = Image.new("""RGB""" , (len(cells[0] ), len(_lowerCAmelCase )) )
SCREAMING_SNAKE_CASE : List[Any] = img.load()
# Save cells to image
for x in range(len(_lowerCAmelCase ) ):
for y in range(len(cells[0] ) ):
SCREAMING_SNAKE_CASE : List[str] = 2_55 - cells[y][x] * 2_55
SCREAMING_SNAKE_CASE : Union[str, Any] = (colour, colour, colour)
# Save image
images.append(_lowerCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = new_generation(_lowerCAmelCase )
return images
if __name__ == "__main__":
__UpperCAmelCase = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 379 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
SCREAMING_SNAKE_CASE_:Optional[int] = logging.getLogger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = bnb_quantization_config.load_in_abit
A : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
A : Any = []
# custom device map
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(device_map.keys() ) > 1:
A : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A : int = get_keys_to_not_convert(_lowerCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowerCAmelCase )
A : Optional[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A : Dict = []
A : Tuple = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowerCAmelCase )
# compatibility with peft
A : Union[str, Any] = load_in_abit
A : Tuple = load_in_abit
A : List[str] = get_parameter_device(_lowerCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
A : Optional[int] = replace_with_bnb_layers(_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
# convert param to the right dtype
A : Tuple = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A : Optional[Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
A : int = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowerCAmelCase ):
param.to(_lowerCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
A : str = replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
A : Optional[Any] = get_quantized_model_device_map(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_memory=_lowerCAmelCase , no_split_module_classes=_lowerCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A : Tuple = True
A : int = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCAmelCase , offload_state_dict=_lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowerCAmelCase , device_map=_lowerCAmelCase , offload_dir=_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[int]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
A : Optional[int] = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
A : Tuple = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A : Any = {}
A : List[str] = special_dtypes
A : Any = no_split_module_classes
A : Union[str, Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A : Tuple = get_balanced_memory(
_lowerCAmelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=_lowerCAmelCase , **_lowerCAmelCase , )
A : int = max_memory
A : Any = infer_auto_device_map(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# check if don't have any quantized module on the cpu
A : Optional[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A : Optional[int] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
A : Optional[Any] = []
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> int:
"""simple docstring"""
A : Optional[int] = False
for name, module in model.named_children():
if current_key_name is None:
A : int = []
current_key_name.append(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A : Dict = """.""".join(_lowerCAmelCase )
A : Optional[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A : Dict = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A : Optional[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
A : Any = module.weight.data
if module.bias is not None:
A : Any = module.bias.data
bnb_module.requires_grad_(_lowerCAmelCase )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Dict = True
if len(list(module.children() ) ) > 0:
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __UpperCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
with init_empty_weights():
A : Tuple = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A : Optional[int] = find_tied_parameters(_lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A : Optional[int] = sum(_lowerCAmelCase , [] )
A : Tuple = len(_lowerCAmelCase ) > 0
# Check if it is a base model
A : List[str] = False
if hasattr(_lowerCAmelCase , """base_model_prefix""" ):
A : Optional[Any] = not hasattr(_lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A : str = list(model.named_children() )
A : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
A : int = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
A : Optional[Any] = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase )
# remove ".weight" from the keys
A : Union[str, Any] = [""".weight""", """.bias"""]
A : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A : List[str] = name.replace(_lowerCAmelCase , """""" )
filtered_module_names.append(_lowerCAmelCase )
return filtered_module_names
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
for m in model.modules():
if isinstance(_lowerCAmelCase , bnb.nn.Linearabit ):
return True
return False
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return next(parameter.parameters() ).device
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , 0 , dtype=_lowerCAmelCase , value=_lowerCAmelCase )
A : Tuple = param_name
A : Union[str, Any] = model
if "." in tensor_name:
A : int = tensor_name.split(""".""" )
for split in splits[:-1]:
A : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
A : Optional[Any] = new_module
A : List[str] = splits[-1]
# offload weights
A : Optional[int] = False
offload_weight(module._parameters[tensor_name] , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase , )
else:
offload_weight(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
offload_weight(_lowerCAmelCase , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase )
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , """meta""" , dtype=_lowerCAmelCase , value=torch.empty(*param.size() ) )
| 662 | 0 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
__magic_name__ = True
except (ImportError, ModuleNotFoundError):
__magic_name__ = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def _lowerCamelCase ( UpperCAmelCase__ ) -> str:
'''simple docstring'''
re.sub('<n>','',_lowerCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCAmelCase ) )
| 232 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
A : Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
A : Dict = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
A : Tuple = parser.parse_args()
if not hasattr(_lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
A : Any = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 662 | 0 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class snake_case_ :
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Any=100 , __lowerCamelCase : Any=13 , __lowerCamelCase : str=30 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : int=32 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : Any=4 , __lowerCamelCase : str=37 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Union[str, Any]=10 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : int=3 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[int]=[0, 1, 2, 3] , ) -> Any:
'''simple docstring'''
__lowercase = parent
__lowercase = 100
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = scope
__lowercase = out_indices
__lowercase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = num_patches + 1
def UpperCAmelCase ( self : Any ) -> Tuple:
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__lowercase = BeitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
__lowercase = BeitForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] ) -> str:
'''simple docstring'''
__lowercase = self.type_sequence_label_size
__lowercase = BeitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = BeitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = BeitForSemanticSegmentation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__lowercase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase ( self : List[str] ) -> Dict:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def UpperCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
__lowercase = BeitModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCamelCase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def UpperCAmelCase ( self : int ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def UpperCAmelCase ( self : str ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ )
def UpperCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]:
continue
__lowercase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
__lowercase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
__lowercase = model(**lowerCamelCase__ ).loss
loss.backward()
def UpperCAmelCase ( self : Optional[int] ) -> Any:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowercase = False
__lowercase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__lowercase = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
__lowercase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
__lowercase = model(**lowerCamelCase__ ).loss
loss.backward()
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
__lowercase = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = BeitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> int:
__lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
__lowercase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(lowerCamelCase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCamelCase__ , return_tensors='pt' ).pixel_values.to(lowerCamelCase__ )
# prepare bool_masked_pos
__lowercase = torch.ones((1, 196) , dtype=torch.bool ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__lowercase = model(pixel_values=lowerCamelCase__ , bool_masked_pos=lowerCamelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , lowerCamelCase__ )
__lowercase = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase__ , atol=1E-2 ) )
@slow
def UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
__lowercase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(lowerCamelCase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCamelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , lowerCamelCase__ )
__lowercase = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
__lowercase = 281
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ )
@slow
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
lowerCamelCase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCamelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , lowerCamelCase__ )
__lowercase = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
__lowercase = 2_396
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ )
@slow
def UpperCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
__lowercase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__lowercase = model.to(lowerCamelCase__ )
__lowercase = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ )
__lowercase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__lowercase = Image.open(ds[0]['file'] )
__lowercase = image_processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCamelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , lowerCamelCase__ )
__lowercase = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
__lowercase = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=lowerCamelCase__ , )
else:
__lowercase = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=lowerCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__lowercase = model.to(lowerCamelCase__ )
__lowercase = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ )
__lowercase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__lowercase = Image.open(ds[0]['file'] )
__lowercase = image_processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCamelCase__ )
__lowercase = outputs.logits.detach().cpu()
__lowercase = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(500, 300)] )
__lowercase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
__lowercase = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ )
__lowercase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
| 375 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:int = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 | 0 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str=13 , lowerCAmelCase__ : int=7 , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=99 , lowerCAmelCase__ : Tuple=32 , lowerCAmelCase__ : Tuple=5 , lowerCAmelCase__ : Tuple=4 , lowerCAmelCase__ : Tuple=37 , lowerCAmelCase__ : List[Any]="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : List[Any]=512 , lowerCAmelCase__ : Optional[int]=16 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : str=0.02 , lowerCAmelCase__ : Optional[Any]=4 , ):
SCREAMING_SNAKE_CASE_: Any = parent
SCREAMING_SNAKE_CASE_: int = batch_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = seq_length
SCREAMING_SNAKE_CASE_: Any = is_training
SCREAMING_SNAKE_CASE_: Optional[Any] = use_attention_mask
SCREAMING_SNAKE_CASE_: Dict = use_token_type_ids
SCREAMING_SNAKE_CASE_: Optional[int] = use_labels
SCREAMING_SNAKE_CASE_: Tuple = vocab_size
SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_: List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_: int = num_attention_heads
SCREAMING_SNAKE_CASE_: List[str] = intermediate_size
SCREAMING_SNAKE_CASE_: str = hidden_act
SCREAMING_SNAKE_CASE_: List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE_: Any = type_vocab_size
SCREAMING_SNAKE_CASE_: str = type_sequence_label_size
SCREAMING_SNAKE_CASE_: Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_: List[Any] = num_choices
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_: str = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_: Any = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE_: List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
SCREAMING_SNAKE_CASE_: str = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_: Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: str = config_and_inputs
SCREAMING_SNAKE_CASE_: Optional[int] = True
SCREAMING_SNAKE_CASE_: int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
SCREAMING_SNAKE_CASE_: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : str = True
_UpperCAmelCase : Dict = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Dict = FlaxRobertaModelTester(self)
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int = model_class_name.from_pretrained("roberta-base" , from_pt=lowerCamelCase__)
SCREAMING_SNAKE_CASE_: int = model(np.ones((1, 1)))
self.assertIsNotNone(lowerCamelCase__)
| 671 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
A : Optional[int] = int(_lowerCAmelCase )
# Initialize Result
A : int = []
# Traverse through all denomination
for denomination in reversed(_lowerCAmelCase ):
# Find denominations
while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ):
total_value -= int(_lowerCAmelCase )
answer.append(_lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = []
SCREAMING_SNAKE_CASE_:Dict = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
SCREAMING_SNAKE_CASE_:Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F"""Following is minimal change for {value}: """)
SCREAMING_SNAKE_CASE_:str = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 662 | 0 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self ):
super().__init__()
__lowercase = nn.Linear(3 , 4 )
__lowercase = nn.BatchNormad(4 )
__lowercase = nn.Linear(4 , 5 )
def snake_case__ ( self , lowerCAmelCase_ ):
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase__ ) ) )
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ):
__lowercase = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase__ , model.state_dict() )
__lowercase = os.path.join(lowerCamelCase__ , "index.json" )
self.assertTrue(os.path.isfile(lowerCamelCase__ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
__lowercase = os.path.join(lowerCamelCase__ , f'''{key}.dat''' )
self.assertTrue(os.path.isfile(lowerCamelCase__ ) )
# TODO: add tests on the fact weights are properly loaded
def snake_case__ ( self ):
__lowercase = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
__lowercase = torch.randn(2 , 3 , dtype=lowerCamelCase__ )
with TemporaryDirectory() as tmp_dir:
__lowercase = offload_weight(lowerCamelCase__ , "weight" , lowerCamelCase__ , {} )
__lowercase = os.path.join(lowerCamelCase__ , "weight.dat" )
self.assertTrue(os.path.isfile(lowerCamelCase__ ) )
self.assertDictEqual(lowerCamelCase__ , {"weight": {"shape": [2, 3], "dtype": str(lowerCamelCase__ ).split("." )[1]}} )
__lowercase = load_offloaded_weight(lowerCamelCase__ , index["weight"] )
self.assertTrue(torch.equal(lowerCamelCase__ , lowerCamelCase__ ) )
def snake_case__ ( self ):
__lowercase = ModelForTest()
__lowercase = model.state_dict()
__lowercase = {k: v for k, v in state_dict.items() if """linear2""" not in k}
__lowercase = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = OffloadedWeightsLoader(state_dict=lowerCamelCase__ , save_folder=lowerCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(lowerCamelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCamelCase__ , weight_map[key] ) )
__lowercase = {k: v for k, v in state_dict.items() if """weight""" in k}
__lowercase = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = OffloadedWeightsLoader(state_dict=lowerCamelCase__ , save_folder=lowerCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(lowerCamelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCamelCase__ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase__ , lowerCamelCase__ )
# Duplicates are removed
__lowercase = OffloadedWeightsLoader(state_dict=lowerCamelCase__ , save_folder=lowerCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(lowerCamelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCamelCase__ , weight_map[key] ) )
def snake_case__ ( self ):
__lowercase = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
__lowercase = extract_submodules_state_dict(lowerCamelCase__ , ["a.1", "a.2"] )
self.assertDictEqual(lowerCamelCase__ , {"a.1": 0, "a.2": 2} )
__lowercase = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
__lowercase = extract_submodules_state_dict(lowerCamelCase__ , ["a.1", "a.2"] )
self.assertDictEqual(lowerCamelCase__ , {"a.1.a": 0, "a.2.a": 2} )
| 321 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_:Union[str, Any] = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE_:Any = dict(zip(vocab, range(len(vocab))))
SCREAMING_SNAKE_CASE_:Dict = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_:List[Any] = Path(tmpdirname)
SCREAMING_SNAKE_CASE_:str = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
SCREAMING_SNAKE_CASE_:Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
SCREAMING_SNAKE_CASE_:Any = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
SCREAMING_SNAKE_CASE_:Optional[Any] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
SCREAMING_SNAKE_CASE_:Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
SCREAMING_SNAKE_CASE_:str = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 662 | 0 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
lowerCAmelCase__ = 3.0
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=lowerCamelCase__ ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def lowerCamelCase ( self ):
'''simple docstring'''
# If no defaults are changed, `to_kwargs` returns an empty dict.
__lowerCamelCase = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
__lowerCamelCase = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__lowerCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , lowerCamelCase__ )
@require_multi_gpu
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
a_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
a_ = Accelerator(kwargs_handlers=[ddp_scaler])
a_ = torch.nn.Linear(100, 200)
a_ = accelerator.prepare(model)
# Check the values changed in kwargs
a_ = """"""
a_ = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 175 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = """Hello, World!"""
SCREAMING_SNAKE_CASE_:List[Any] = """en_XX"""
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Optional[int] = Path("""data_bin""" )
A : Optional[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
A : Any = xmod.model.encoder.sentence_encoder
A : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowerCAmelCase )
A : int = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A : Any = xmod_sent_encoder.embed_tokens.weight
A : int = xmod_sent_encoder.embed_positions.weight
A : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A : Dict = xmod_sent_encoder.layernorm_embedding.weight
A : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A : str = model.roberta.encoder.layer[i]
A : Tuple = xmod_sent_encoder.layers[i]
# self attention
A : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
A : List[str] = xmod_layer.self_attn.q_proj.weight
A : Optional[int] = xmod_layer.self_attn.q_proj.bias
A : List[Any] = xmod_layer.self_attn.k_proj.weight
A : Union[str, Any] = xmod_layer.self_attn.k_proj.bias
A : Optional[int] = xmod_layer.self_attn.v_proj.weight
A : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
A : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
A : Optional[Any] = xmod_layer.self_attn.out_proj.weight
A : Dict = xmod_layer.self_attn.out_proj.bias
A : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight
A : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
A : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
A : Optional[int] = xmod_layer.fca.weight
A : Optional[int] = xmod_layer.fca.bias
# output
A : Dict = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
A : Union[str, Any] = xmod_layer.fca.weight
A : int = xmod_layer.fca.bias
A : List[str] = xmod_layer.final_layer_norm.weight
A : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A : str = xmod_layer.adapter_layer_norm.weight
A : str = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A : Optional[int] = bert_output.adapter_modules[lang_code]
A : int = xmod_layer.adapter_modules[lang_code]
A : Optional[Any] = from_adapter.fca.weight
A : Optional[Any] = from_adapter.fca.bias
A : List[str] = from_adapter.fca.weight
A : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A : Dict = xmod_sent_encoder.layer_norm.weight
A : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
A : int = xmod.model.classification_heads["""mnli"""].dense.weight
A : Optional[Any] = xmod.model.classification_heads["""mnli"""].dense.bias
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
A : Any = xmod.model.encoder.lm_head.dense.weight
A : Tuple = xmod.model.encoder.lm_head.dense.bias
A : Any = xmod.model.encoder.lm_head.layer_norm.weight
A : List[str] = xmod.model.encoder.lm_head.layer_norm.bias
A : Union[str, Any] = xmod.model.encoder.lm_head.weight
A : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A : Optional[int] = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
A : List[str] = model(_lowerCAmelCase )[0]
if classification_head:
A : Dict = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) )
else:
A : Optional[Any] = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A : Optional[Any] = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 662 | 0 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase = False ) -> str:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ : Optional[Any] = F"""Expected string as input, found {type(_lowerCAmelCase )}"""
raise ValueError(_lowerCAmelCase )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ : Union[str, Any] = F"""Expected boolean as use_pascal parameter, found {type(_lowerCAmelCase )}"""
raise ValueError(_lowerCAmelCase )
lowerCamelCase__ : Optional[Any] = input_str.split('_' )
lowerCamelCase__ : Tuple = 0 if use_pascal else 1
lowerCamelCase__ : Union[str, Any] = words[start_index:]
lowerCamelCase__ : Tuple = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowerCamelCase__ : Tuple = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 295 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Any = tempfile.mkdtemp()
A : List[str] = BlipImageProcessor()
A : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
A : str = BlipProcessor(lowerCamelCase__, lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).tokenizer
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).image_processor
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Any = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : Any = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : int = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Any = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
A : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase__, padding_value=1.0 )
A : Dict = BlipProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : str = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = self.prepare_image_inputs()
A : int = image_processor(lowerCamelCase__, return_tensors="""np""" )
A : Optional[Any] = processor(images=lowerCamelCase__, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : List[str] = self.get_image_processor()
A : int = self.get_tokenizer()
A : str = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = """lower newer"""
A : List[Any] = processor(text=lowerCamelCase__ )
A : str = tokenizer(lowerCamelCase__, return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Union[str, Any] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : Union[str, Any] = self.prepare_image_inputs()
A : str = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Optional[int] = processor.batch_decode(lowerCamelCase__ )
A : Dict = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : int = self.get_tokenizer()
A : Optional[int] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : List[str] = self.prepare_image_inputs()
A : Optional[int] = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 662 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self : str , lowercase_ : str ):
snake_case_ : Any = value
snake_case_ : Node | None = None
snake_case_ : Node | None = None
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , lowercase_ : Tuple ):
snake_case_ : int = tree
def _snake_case ( self : Tuple , lowercase_ : Optional[Any] ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Any ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 123 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
return f'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 4, 64, 64), lowerCamelCase__=False ):
A : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return image
def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__="CompVis/stable-diffusion-v1-4" ):
A : str = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = """bf16""" if fpaa else None
A , A : str = FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase__, subfolder="""unet""", dtype=lowerCamelCase__, revision=lowerCamelCase__ )
return model, params
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 77, 768), lowerCamelCase__=False ):
A : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
A : List[str] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : List[str] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""", fpaa=lowerCamelCase__ )
A : str = self.get_latents(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : int = self.get_encoder_hidden_states(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : Optional[Any] = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : Dict = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""", fpaa=lowerCamelCase__ )
A : int = self.get_latents(lowerCamelCase__, shape=(4, 4, 96, 96), fpaa=lowerCamelCase__ )
A : Union[str, Any] = self.get_encoder_hidden_states(lowerCamelCase__, shape=(4, 77, 1024), fpaa=lowerCamelCase__ )
A : Dict = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : List[Any] = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
| 662 | 0 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _snake_case :
def __init__( self: Any , __lowerCamelCase: List[Any] , __lowerCamelCase: List[Any]=99 , __lowerCamelCase: Union[str, Any]=13 , __lowerCamelCase: str=7 , __lowerCamelCase: Tuple=9 , __lowerCamelCase: Any=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: List[Any]=False , __lowerCamelCase: Optional[Any]=32 , __lowerCamelCase: List[Any]=5 , __lowerCamelCase: Optional[int]=4 , __lowerCamelCase: Union[str, Any]=37 , __lowerCamelCase: List[str]=8 , __lowerCamelCase: Union[str, Any]=0.1 , __lowerCamelCase: Union[str, Any]=0.0_02 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Optional[int]=0 , __lowerCamelCase: Optional[Any]=0 , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=None , ) -> List[Any]:
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : str = encoder_seq_length
__UpperCAmelCase : Dict = decoder_seq_length
# For common tests
__UpperCAmelCase : Tuple = self.decoder_seq_length
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : List[str] = use_attention_mask
__UpperCAmelCase : Tuple = use_labels
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : List[str] = num_attention_heads
__UpperCAmelCase : str = d_ff
__UpperCAmelCase : int = relative_attention_num_buckets
__UpperCAmelCase : Any = dropout_rate
__UpperCAmelCase : int = initializer_factor
__UpperCAmelCase : Union[str, Any] = eos_token_id
__UpperCAmelCase : Optional[Any] = pad_token_id
__UpperCAmelCase : int = decoder_start_token_id
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Any = decoder_layers
def _lowerCamelCase ( self: Any ) -> Optional[int]:
return TaConfig.from_pretrained("google/umt5-base" )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Tuple , __lowerCamelCase: int , __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: Union[str, Any]=None , __lowerCamelCase: Union[str, Any]=None , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[Any]=None , ) -> List[str]:
if attention_mask is None:
__UpperCAmelCase : Dict = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__UpperCAmelCase : Optional[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__UpperCAmelCase : Dict = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCamelCase__ )
if decoder_head_mask is None:
__UpperCAmelCase : int = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase__ )
if cross_attn_head_mask is None:
__UpperCAmelCase : Any = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__UpperCAmelCase : List[Any] = input_ids.clamp(self.pad_token_id + 1 )
__UpperCAmelCase : Tuple = decoder_input_ids.clamp(self.pad_token_id + 1 )
__UpperCAmelCase : Dict = self.get_config()
__UpperCAmelCase : List[Any] = config.num_attention_heads
__UpperCAmelCase : Dict = self.prepare_inputs_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, input_dict
def _lowerCamelCase ( self: Any ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[Any]:
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowerCamelCase ( self: Optional[Any] ) -> List[Any]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , ) -> List[str]:
__UpperCAmelCase : List[Any] = UMTaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(
input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , )
__UpperCAmelCase : int = model(input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
__UpperCAmelCase : Any = result.last_hidden_state
__UpperCAmelCase : str = result.past_key_values
__UpperCAmelCase : List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCamelCase__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: int , __lowerCamelCase: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] , ) -> int:
__UpperCAmelCase : str = UMTaModel(config=lowerCamelCase__ ).get_decoder().to(lowerCamelCase__ ).eval()
# first forward pass
__UpperCAmelCase : Tuple = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
__UpperCAmelCase : int = model(lowerCamelCase__ )
__UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) + 1 )
__UpperCAmelCase : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__UpperCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCAmelCase : List[Any] = model(lowerCamelCase__ )["""last_hidden_state"""]
__UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ , past_key_values=lowerCamelCase__ )["""last_hidden_state"""]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCAmelCase : Any = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def _lowerCamelCase ( self: str , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Tuple , ) -> Optional[Any]:
__UpperCAmelCase : Union[str, Any] = UMTaModel(config=lowerCamelCase__ ).to(lowerCamelCase__ ).half().eval()
__UpperCAmelCase : str = model(**lowerCamelCase__ )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(lowerCamelCase__ ).any().item() )
@require_torch
class _snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowerCamelCase__: Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowerCamelCase__: List[str] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
lowerCamelCase__: List[str] = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowerCamelCase__: str = True
lowerCamelCase__: List[str] = False
lowerCamelCase__: Dict = False
lowerCamelCase__: List[str] = True
lowerCamelCase__: Optional[int] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowerCamelCase__: Tuple = [0.8, 0.9]
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Optional[int] = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def _lowerCamelCase ( self: Optional[Any] ) -> Dict:
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase : Optional[int] = UMTaModel(config_and_inputs[0] ).to(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCamelCase__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=lowerCamelCase__ , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def _lowerCamelCase ( self: List[str] ) -> List[Any]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCamelCase__ )
def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]:
__UpperCAmelCase : Tuple = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase : Dict = config_and_inputs[0]
__UpperCAmelCase : Any = UMTaForConditionalGeneration(lowerCamelCase__ ).eval()
model.to(lowerCamelCase__ )
__UpperCAmelCase : Union[str, Any] = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=lowerCamelCase__ ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase__ ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase__ ),
}
for attn_name, (name, mask) in zip(lowerCamelCase__ , head_masking.items() ):
__UpperCAmelCase : Dict = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__UpperCAmelCase : List[Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=lowerCamelCase__ , return_dict_in_generate=lowerCamelCase__ , **lowerCamelCase__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__UpperCAmelCase : Any = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def _lowerCamelCase ( self: List[str] ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : List[str] = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=lowerCamelCase__ ).to(lowerCamelCase__ )
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=lowerCamelCase__ , legacy=lowerCamelCase__ )
__UpperCAmelCase : Optional[int] = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
__UpperCAmelCase : int = tokenizer(lowerCamelCase__ , return_tensors="pt" , padding=lowerCamelCase__ ).input_ids
# fmt: off
__UpperCAmelCase : Any = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : str = model.generate(input_ids.to(lowerCamelCase__ ) )
__UpperCAmelCase : int = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 382 |
from typing import Any
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Any = v.conjugate().T
A : List[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A : str = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 662 | 0 |
def _UpperCAmelCase ( a : Optional[int] , a : Optional[Any] ):
return int((input_a, input_a).count(0 ) == 0 )
def _UpperCAmelCase ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 654 |
from __future__ import annotations
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
A , A : int = np.shape(_lowerCAmelCase )
if rows != columns:
A : Union[str, Any] = (
"""'table' has to be of square shaped array but got a """
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_lowerCAmelCase )
A : Union[str, Any] = np.zeros((rows, columns) )
A : Dict = np.zeros((rows, columns) )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
A : Any = (table[i][j] - total) / upper[j][j]
A : Union[str, Any] = 1
for j in range(_lowerCAmelCase , _lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
A : str = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 0 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = """https://openaipublic.azureedge.net/jukebox/models/"""
__A = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowerCAmelCase_ ( __a ) -> Optional[Any]:
"""simple docstring"""
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
lowerCamelCase__: List[Any] =key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
lowerCamelCase__: Optional[int] =key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
lowerCamelCase__: int =key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
lowerCamelCase__: Any =key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
lowerCamelCase__: Tuple =key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
lowerCamelCase__: Tuple =key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCamelCase__: str =key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
lowerCamelCase__: Optional[int] =key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def lowerCAmelCase_ ( __a , __a , __a , __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] ={}
import re
lowerCamelCase__: Optional[Any] =re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
lowerCamelCase__: Union[str, Any] =re.compile(
R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowerCamelCase__: List[str] =re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
lowerCamelCase__: List[str] =re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
lowerCamelCase__: int =re.compile(
R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowerCamelCase__: Dict =re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
lowerCamelCase__: Union[str, Any] =re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
lowerCamelCase__: List[str] =re.compile(
R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowerCamelCase__: Dict =re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCAmelCase ):
lowerCamelCase__: Any =re_encoder_block_conv_in.match(_lowerCAmelCase )
lowerCamelCase__: Optional[int] =regex_match.groups()
lowerCamelCase__: List[str] =int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase__: List[str] =F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
lowerCamelCase__: List[Any] =re_encoder_block_conv_in.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCAmelCase ):
lowerCamelCase__: List[str] =re_encoder_block_resnet.match(_lowerCAmelCase )
lowerCamelCase__: Optional[Any] =regex_match.groups()
lowerCamelCase__: Tuple =int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase__: List[Any] ={"""1""": 1, """3""": 2}[groups[-2]]
lowerCamelCase__: Any =F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
lowerCamelCase__: List[str] =F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowerCamelCase__: Tuple =prefix + resnet_block
lowerCamelCase__: Tuple =re_encoder_block_resnet.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCAmelCase ):
lowerCamelCase__: Union[str, Any] =re_encoder_block_proj_out.match(_lowerCAmelCase )
lowerCamelCase__: List[str] =regex_match.groups()
lowerCamelCase__: Dict =F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
lowerCamelCase__: str =re_encoder_block_proj_out.sub(_lowerCAmelCase , _lowerCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCAmelCase ):
lowerCamelCase__: int =re_decoder_block_conv_out.match(_lowerCAmelCase )
lowerCamelCase__: Dict =regex_match.groups()
lowerCamelCase__: Dict =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase__: Union[str, Any] =F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
lowerCamelCase__: int =re_decoder_block_conv_out.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCAmelCase ):
lowerCamelCase__: Optional[int] =re_decoder_block_resnet.match(_lowerCAmelCase )
lowerCamelCase__: List[str] =regex_match.groups()
lowerCamelCase__: List[Any] =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase__: List[str] ={"""1""": 1, """3""": 2}[groups[-2]]
lowerCamelCase__: Tuple =F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
lowerCamelCase__: int =F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowerCamelCase__: Dict =prefix + resnet_block
lowerCamelCase__: Union[str, Any] =re_decoder_block_resnet.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCAmelCase ):
lowerCamelCase__: Tuple =re_decoder_block_proj_in.match(_lowerCAmelCase )
lowerCamelCase__: int =regex_match.groups()
lowerCamelCase__: str =F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
lowerCamelCase__: int =re_decoder_block_proj_in.sub(_lowerCAmelCase , _lowerCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCAmelCase ):
lowerCamelCase__: str =re_prior_cond_conv_out.match(_lowerCAmelCase )
lowerCamelCase__: Dict =regex_match.groups()
lowerCamelCase__: List[Any] =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase__: List[Any] =F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
lowerCamelCase__: str =re_prior_cond_conv_out.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCAmelCase ):
lowerCamelCase__: Optional[int] =re_prior_cond_resnet.match(_lowerCAmelCase )
lowerCamelCase__: Optional[Any] =regex_match.groups()
lowerCamelCase__: List[str] =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase__: Tuple ={"""1""": 1, """3""": 2}[groups[-2]]
lowerCamelCase__: Dict =F"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
lowerCamelCase__: List[str] =F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowerCamelCase__: str =prefix + resnet_block
lowerCamelCase__: str =re_prior_cond_resnet.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCAmelCase ):
lowerCamelCase__: Optional[int] =re_prior_cond_proj_in.match(_lowerCAmelCase )
lowerCamelCase__: Optional[Any] =regex_match.groups()
lowerCamelCase__: List[str] =F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
lowerCamelCase__: Optional[int] =re_prior_cond_proj_in.sub(_lowerCAmelCase , _lowerCAmelCase )
# keep original key
else:
lowerCamelCase__: List[str] =original_key
lowerCamelCase__: Dict =replace_key(_lowerCAmelCase )
if F"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(F"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape:
lowerCamelCase__: Union[str, Any] =model_state_dict[F"""{key_prefix}.{key}"""]
print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
lowerCamelCase__: Optional[Any] =original_key
lowerCamelCase__: Union[str, Any] =original_key
lowerCamelCase__: Optional[Any] =value
return new_dict
@torch.no_grad()
def lowerCAmelCase_ ( __a=None , __a=None ) -> Tuple:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
lowerCamelCase__: Tuple =requests.get(F"""{PREFIX}{file}""" , allow_redirects=_lowerCAmelCase )
os.makedirs(F"""{pytorch_dump_folder_path}/""" , exist_ok=_lowerCAmelCase )
open(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , "wb" ).write(r.content )
lowerCamelCase__: List[str] =MODEL_MAPPING[model_name.split("/" )[-1]]
lowerCamelCase__: Union[str, Any] =JukeboxConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__: Optional[int] =JukeboxModel(_lowerCAmelCase )
lowerCamelCase__: Optional[int] =[]
lowerCamelCase__: Any ={}
for i, dict_name in enumerate(_lowerCAmelCase ):
lowerCamelCase__: Tuple =torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )["""model"""]
lowerCamelCase__: int ={}
for k in old_dic.keys():
if k.endswith(".b" ):
lowerCamelCase__: str =old_dic[k]
elif k.endswith(".w" ):
lowerCamelCase__: Any =old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCamelCase__: List[Any] =old_dic[k]
else:
lowerCamelCase__: Union[str, Any] =old_dic[k]
lowerCamelCase__: int ="""vqvae""" if i == 0 else F"""priors.{3 - i}"""
lowerCamelCase__: List[Any] =fix_jukebox_keys(_lowerCAmelCase , model.state_dict() , _lowerCAmelCase , _lowerCAmelCase )
weight_dict.append(_lowerCAmelCase )
lowerCamelCase__: List[str] =weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCAmelCase )
for i in range(len(_lowerCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
with open(F"""{pytorch_dump_folder_path}/mapping.json""" , "w" ) as txtfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
return weight_dict
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
__A = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 59 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None ):
A : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
A : Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
A : Any = math.ceil(val / multiple ) * multiple
return x
A : Optional[Any] = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size
A , A : List[Any] = get_image_size(_lowerCAmelCase )
A , A : List[Any] = output_size
# determine new height and width
A : Optional[int] = output_height / input_height
A : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
A : Any = scale_width
else:
# fit height
A : int = scale_height
A : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase )
A : int = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : int = size if size is not None else {"""height""": 384, """width""": 384}
A : str = get_size_dict(lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Optional[int] = size
A : Union[str, Any] = keep_aspect_ratio
A : int = ensure_multiple_of
A : Dict = resample
A : Optional[Any] = do_rescale
A : Any = rescale_factor
A : str = do_normalize
A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Dict = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
A : Optional[Any] = get_resize_output_image_size(
lowerCamelCase__, output_size=(size["""height"""], size["""width"""]), keep_aspect_ratio=lowerCamelCase__, multiple=lowerCamelCase__, )
return resize(lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A : str = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__ )
A : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
A : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
A : Tuple = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A : int = do_normalize if do_normalize is not None else self.do_normalize
A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
A : Optional[int] = image_std if image_std is not None else self.image_std
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : str = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Dict = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : Optional[Any] = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Dict = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCamelCase__ ):
A : int = target_sizes.numpy()
A : Union[str, Any] = []
for idx in range(len(lowerCamelCase__ ) ):
A : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="""bilinear""", align_corners=lowerCamelCase__ )
A : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
A : List[str] = logits.argmax(dim=1 )
A : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 662 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__UpperCAmelCase = [num for num in range(3, 100001, 2) if not is_prime(num)]
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
SCREAMING_SNAKE_CASE : Any = []
for num in range(len(_lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE : List[str] = 0
while 2 * i * i <= odd_composites[num]:
SCREAMING_SNAKE_CASE : Any = odd_composites[num] - 2 * i * i
if is_prime(_lowerCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_lowerCAmelCase ) == n:
return list_nums
return []
def __A ( ):
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 379 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
# we need a list not a string, so do something to change the type
A : List[Any] = arr.split(""",""" )
def _lowerCAmelCase ( self ):
A : int = [int(self.array[0] )] * len(self.array )
A : Optional[Any] = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
A : Union[str, Any] = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
A : Dict = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:int = input("""please input some numbers:""")
SCREAMING_SNAKE_CASE_:Dict = SubArray(whole_array)
SCREAMING_SNAKE_CASE_:Optional[int] = array.solve_sub_array()
print(("""the results is:""", re))
| 662 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.