code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
lowerCamelCase__ = get_logger()
lowerCamelCase__ = None
class lowerCAmelCase__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self : List[str] , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : Dict=None , **lowerCamelCase__ : List[str] ) ->str:
'''simple docstring'''
super().__init__(features=lowerCamelCase__ )
import jax
from jaxlib.xla_client import Device
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(lowerCamelCase__ )}, as `jaxlib.xla_extension.Device` """
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
_UpperCAmelCase : Any = device if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase : Union[str, Any] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
_UpperCAmelCase : Optional[Any] = str(jax.devices()[0] )
_UpperCAmelCase : int = jnp_array_kwargs
@staticmethod
def lowerCAmelCase__ ( ) ->Dict[str, "jaxlib.xla_extension.Device"]:
'''simple docstring'''
import jax
return {str(lowerCamelCase__ ): device for device in jax.devices()}
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Tuple ) ->Any:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and column:
if all(
isinstance(lowerCamelCase__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(lowerCamelCase__ , axis=0 )
return column
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] ) ->Optional[Any]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(lowerCamelCase__ , (str, bytes, type(lowerCamelCase__ )) ):
return value
elif isinstance(lowerCamelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_UpperCAmelCase : Tuple = {}
if isinstance(lowerCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_UpperCAmelCase : Tuple = {"dtype": jnp.intaa}
else:
_UpperCAmelCase : int = {"dtype": jnp.intaa}
elif isinstance(lowerCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_UpperCAmelCase : Dict = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCamelCase__ , PIL.Image.Image ):
_UpperCAmelCase : Tuple = np.asarray(lowerCamelCase__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase : Tuple = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(lowerCamelCase__ , **{**default_dtype, **self.jnp_array_kwargs} )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(lowerCamelCase__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(lowerCamelCase__ , "__array__" ) and not isinstance(lowerCamelCase__ , jax.Array ):
_UpperCAmelCase : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCamelCase__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCamelCase__ ) for substruct in data_struct] )
elif isinstance(lowerCamelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCamelCase__ ) for substruct in data_struct] )
return self._tensorize(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : dict ) ->Optional[Any]:
'''simple docstring'''
return map_nested(self._recursive_tensorize , lowerCamelCase__ , map_list=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase : str = self.numpy_arrow_extractor().extract_row(lowerCamelCase__ )
_UpperCAmelCase : List[str] = self.python_features_decoder.decode_row(lowerCamelCase__ )
return self.recursive_tensorize(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : pa.Table ) ->"jax.Array":
'''simple docstring'''
_UpperCAmelCase : int = self.numpy_arrow_extractor().extract_column(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(lowerCamelCase__ , pa_table.column_names[0] )
_UpperCAmelCase : Tuple = self.recursive_tensorize(lowerCamelCase__ )
_UpperCAmelCase : str = self._consolidate(lowerCamelCase__ )
return column
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_batch(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = self.python_features_decoder.decode_batch(lowerCamelCase__ )
_UpperCAmelCase : Any = self.recursive_tensorize(lowerCamelCase__ )
for column_name in batch:
_UpperCAmelCase : Union[str, Any] = self._consolidate(batch[column_name] )
return batch
| 40
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if dataset.ndim != value_array.ndim:
_UpperCAmelCase : Optional[Any] = (
"Wrong input data's dimensions... "
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCAmelCase : Optional[int] = (
"Wrong input data's shape... "
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_UpperCAmelCase : Union[str, Any] = (
"Input data have different datatype... "
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = []
for value in value_array:
_UpperCAmelCase : List[str] = euclidean(__lowerCAmelCase , dataset[0] )
_UpperCAmelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCAmelCase : int = euclidean(__lowerCAmelCase , __lowerCAmelCase )
if dist > temp_dist:
_UpperCAmelCase : Tuple = temp_dist
_UpperCAmelCase : Dict = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return np.dot(__lowerCAmelCase , __lowerCAmelCase ) / (norm(__lowerCAmelCase ) * norm(__lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = "laion/clap-htsat-unfused"
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
def lowerCAmelCase__ ( self : List[Any] , **lowerCamelCase__ : Tuple ) ->List[Any]:
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] , **lowerCamelCase__ : List[Any] ) ->List[str]:
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Any ) ->Dict:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.get_tokenizer()
_UpperCAmelCase : Union[str, Any] = self.get_feature_extractor()
_UpperCAmelCase : str = ClapProcessor(tokenizer=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : int = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Tuple = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase : Dict = self.get_feature_extractor(do_normalize=lowerCamelCase__ , padding_value=1.0 )
_UpperCAmelCase : Any = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.get_feature_extractor()
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : List[Any] = ClapProcessor(tokenizer=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
_UpperCAmelCase : str = floats_list((3, 10_00) )
_UpperCAmelCase : Tuple = feature_extractor(lowerCamelCase__ , return_tensors="np" )
_UpperCAmelCase : Tuple = processor(audios=lowerCamelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.get_feature_extractor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : int = ClapProcessor(tokenizer=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = "This is a test string"
_UpperCAmelCase : List[Any] = processor(text=lowerCamelCase__ )
_UpperCAmelCase : str = tokenizer(lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = self.get_feature_extractor()
_UpperCAmelCase : Optional[Any] = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = ClapProcessor(tokenizer=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
_UpperCAmelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : List[str] = processor.batch_decode(lowerCamelCase__ )
_UpperCAmelCase : Dict = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.get_feature_extractor()
_UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
_UpperCAmelCase : Tuple = ClapProcessor(tokenizer=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 40
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase__ = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
_UpperCAmelCase : Optional[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowerCamelCase__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any=None ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCAmelCase : Tuple = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCAmelCase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_UpperCAmelCase : Tuple = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowerCamelCase__ , "w" , newline="\n" ) as f:
f.write(lowerCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__ )
with open(lowerCamelCase__ , "r" ) as f:
self.assertTrue(f.read() , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowerCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
# Copy consistency with a really long name
_UpperCAmelCase : int = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , lowerCamelCase__ , lowerCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowerCamelCase__ , overwrite_result=re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
| 40
| 1
|
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def __lowerCAmelCase (__lowerCAmelCase ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 40
|
'''simple docstring'''
from math import factorial
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = real
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Any = [1] * rank
else:
_UpperCAmelCase : Dict = rank
def __repr__( self : str ) ->List[str]:
'''simple docstring'''
return (
F"""{self.real}+"""
F"""{'+'.join(str(lowerCamelCase__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCamelCase__ )
def __add__( self : Dict , lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return Dual(self.real + other , self.duals )
_UpperCAmelCase : Optional[int] = self.duals.copy()
_UpperCAmelCase : Optional[int] = other.duals.copy()
if len(lowerCamelCase__ ) > len(lowerCamelCase__ ):
o_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
elif len(lowerCamelCase__ ) < len(lowerCamelCase__ ):
s_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
_UpperCAmelCase : Union[str, Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCamelCase__ )
lowerCAmelCase : Tuple = __add__
def __sub__( self : List[Any] , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
return self + other * -1
def __mul__( self : List[str] , lowerCamelCase__ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = __mul__
def __truediv__( self : Optional[Any] , lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCamelCase__ )
raise ValueError
def __floordiv__( self : str , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Tuple = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCamelCase__ )
raise ValueError
def __pow__( self : Tuple , lowerCamelCase__ : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
if n < 0 or isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
_UpperCAmelCase : str = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not callable(__lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
_UpperCAmelCase : int = Dual(__lowerCAmelCase , 1 )
_UpperCAmelCase : Optional[int] = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCAmelCase (__lowerCAmelCase ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 40
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase__ = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
lowerCamelCase__ = '▁'
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = VOCAB_FILES_NAMES
lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : List[str] = BigBirdTokenizer
lowerCAmelCase : str = ["input_ids", "attention_mask"]
lowerCAmelCase : List[int] = []
def __init__( self : Tuple , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : Optional[Any]="<unk>" , lowerCamelCase__ : int="<s>" , lowerCamelCase__ : str="</s>" , lowerCamelCase__ : Optional[Any]="<pad>" , lowerCamelCase__ : Tuple="[SEP]" , lowerCamelCase__ : Union[str, Any]="[MASK]" , lowerCamelCase__ : str="[CLS]" , **lowerCamelCase__ : Optional[int] , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token
_UpperCAmelCase : Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
_UpperCAmelCase : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
_UpperCAmelCase : List[str] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
_UpperCAmelCase : Tuple = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token
_UpperCAmelCase : List[str] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , **lowerCamelCase__ , )
_UpperCAmelCase : Optional[int] = vocab_file
_UpperCAmelCase : Optional[Any] = False if not self.vocab_file else True
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = [self.sep_token_id]
_UpperCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : str = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 40
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 40
| 1
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase__ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCamelCase__ = {
'169M': 768,
'430M': 1_024,
'1B5': 2_048,
'3B': 2_560,
'7B': 4_096,
'14B': 5_120,
}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = list(state_dict.keys() )
for name in state_dict_keys:
_UpperCAmelCase : Optional[int] = state_dict.pop(__lowerCAmelCase )
# emb -> embedding
if name.startswith("emb." ):
_UpperCAmelCase : Tuple = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
_UpperCAmelCase : Optional[int] = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
_UpperCAmelCase : Union[str, Any] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __lowerCAmelCase )
# ffn -> feed_forward
_UpperCAmelCase : Dict = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __lowerCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
_UpperCAmelCase : int = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
_UpperCAmelCase : Union[str, Any] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
_UpperCAmelCase : int = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
_UpperCAmelCase : List[str] = "rwkv." + name
_UpperCAmelCase : Optional[Any] = weight
return state_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
_UpperCAmelCase : str = 50_277
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
_UpperCAmelCase : Tuple = PreTrainedTokenizerFast(tokenizer_file=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
# 2. Build the config
_UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_UpperCAmelCase : Optional[Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_UpperCAmelCase : Any = RwkvConfig(
vocab_size=__lowerCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCAmelCase )
# 3. Download model file then convert state_dict
_UpperCAmelCase : str = hf_hub_download(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = torch.load(__lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase : Any = convert_state_dict(__lowerCAmelCase )
# 4. Split in shards and save
_UpperCAmelCase , _UpperCAmelCase : List[str] = shard_checkpoint(__lowerCAmelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if index is not None:
_UpperCAmelCase : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# Save the index as well
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
_UpperCAmelCase : int = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n"
f.write(__lowerCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
_UpperCAmelCase : Union[str, Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_UpperCAmelCase : Union[str, Any] = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
_UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
model.push_to_hub(__lowerCAmelCase , max_shard_size="2GB" )
tokenizer.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowerCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 40
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : str=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Tuple=99 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Optional[int]=4 , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : int = use_attention_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Union[str, Any] = num_choices
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_attention_mask:
_UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
if self.use_token_type_ids:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Tuple = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Any = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : str = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )[0]
_UpperCAmelCase : int = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
_UpperCAmelCase : int = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
_UpperCAmelCase : str = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 40
| 1
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 40
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 40
| 1
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ): # noqa: E741
_UpperCAmelCase : List[str] = len(__lowerCAmelCase )
_UpperCAmelCase : str = 0
_UpperCAmelCase : List[str] = [0] * n
_UpperCAmelCase : int = [False] * n
_UpperCAmelCase : Dict = [False] * n
def dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if parent == root:
out_edge_count += 1
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : str = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_UpperCAmelCase : List[str] = dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Tuple = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_UpperCAmelCase : Dict = True
# AP found via cycle
if at == low[to]:
_UpperCAmelCase : Dict = True
else:
_UpperCAmelCase : Optional[int] = min(low[at] , __lowerCAmelCase )
return out_edge_count
for i in range(__lowerCAmelCase ):
if not visited[i]:
_UpperCAmelCase : str = 0
_UpperCAmelCase : Tuple = dfs(__lowerCAmelCase , __lowerCAmelCase , -1 , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = out_edge_count > 1
for x in range(len(__lowerCAmelCase ) ):
if is_art[x] is True:
print(__lowerCAmelCase )
# Adjacency list of graph
lowerCamelCase__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 40
|
'''simple docstring'''
import os
def __lowerCAmelCase ():
_UpperCAmelCase : List[Any] = os.path.join(os.path.dirname(__lowerCAmelCase ) , "num.txt" )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 40
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : str ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
# fmt: off
_UpperCAmelCase : Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
_UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCAmelCase : Tuple = {
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
_UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , lowerCamelCase__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , **lowerCamelCase__ : Tuple ) ->int:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , **lowerCamelCase__ : Optional[int] ) ->Tuple:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_UpperCAmelCase : str = [Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.get_tokenizer()
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase : Any = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase : Optional[int] = self.get_image_processor(do_normalize=lowerCamelCase__ , padding_value=1.0 )
_UpperCAmelCase : int = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : int = self.prepare_image_inputs()
_UpperCAmelCase : Dict = image_processor(lowerCamelCase__ , return_tensors="np" )
_UpperCAmelCase : List[str] = processor(images=lowerCamelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase__ ( self : List[str] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.get_image_processor()
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = "lower newer"
_UpperCAmelCase : int = processor(text=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tokenizer(lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.get_image_processor()
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : Any = "lower newer"
_UpperCAmelCase : List[str] = self.prepare_image_inputs()
_UpperCAmelCase : int = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(lowerCamelCase__ ):
processor()
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.get_image_processor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : str = processor.batch_decode(lowerCamelCase__ )
_UpperCAmelCase : str = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Tuple = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = "lower newer"
_UpperCAmelCase : List[str] = self.prepare_image_inputs()
_UpperCAmelCase : str = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 40
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase__ = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int=1 ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = tokenizer
_UpperCAmelCase : Tuple = dataset
_UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ ) if n_tasks is None else n_tasks
_UpperCAmelCase : Any = n_copies
def __iter__( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
_UpperCAmelCase : Optional[Any] = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = start_length
_UpperCAmelCase : Union[str, Any] = eof_strings
_UpperCAmelCase : Union[str, Any] = tokenizer
def __call__( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_UpperCAmelCase : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase__ )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = re.split("(%s)" % "|".join(__lowerCAmelCase ) , __lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=20 , **__lowerCAmelCase ):
_UpperCAmelCase : Tuple = defaultdict(__lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowerCAmelCase ) ):
with torch.no_grad():
_UpperCAmelCase : Tuple = batch["ids"].shape[-1]
_UpperCAmelCase : Optional[int] = accelerator.unwrap_model(__lowerCAmelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=__lowerCAmelCase , **__lowerCAmelCase )
# each task is generated batch_size times
_UpperCAmelCase : str = batch["task_id"].repeat(__lowerCAmelCase )
_UpperCAmelCase : str = accelerator.pad_across_processes(
__lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
_UpperCAmelCase , _UpperCAmelCase : int = accelerator.gather((generated_tokens, generated_tasks) )
_UpperCAmelCase : Dict = generated_tokens.cpu().numpy()
_UpperCAmelCase : Dict = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowerCAmelCase , __lowerCAmelCase ):
gen_token_dict[task].append(__lowerCAmelCase )
_UpperCAmelCase : int = [[] for _ in range(__lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_UpperCAmelCase : List[Any] = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
code_gens[task].append(remove_last_block(__lowerCAmelCase ) )
return code_gens
def __lowerCAmelCase ():
# Setup configuration
_UpperCAmelCase : List[str] = HfArgumentParser(__lowerCAmelCase )
_UpperCAmelCase : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_UpperCAmelCase : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_UpperCAmelCase : List[str] = "false"
if args.num_workers is None:
_UpperCAmelCase : List[str] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_UpperCAmelCase : List[Any] = Accelerator()
set_seed(args.seed , device_specific=__lowerCAmelCase )
# Load model and tokenizer
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase : List[str] = tokenizer.eos_token
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_UpperCAmelCase : Tuple = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowerCAmelCase , __lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
_UpperCAmelCase : Union[str, Any] = load_dataset("openai_humaneval" )
_UpperCAmelCase : List[Any] = load_metric("code_eval" )
_UpperCAmelCase : Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
_UpperCAmelCase : Any = args.n_samples // args.batch_size
_UpperCAmelCase : Tuple = TokenizedDataset(__lowerCAmelCase , human_eval["test"] , n_copies=__lowerCAmelCase , n_tasks=__lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
_UpperCAmelCase : List[str] = DataLoader(__lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_UpperCAmelCase : Optional[int] = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
_UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Dict = complete_code(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , n_tasks=__lowerCAmelCase , batch_size=args.batch_size , **__lowerCAmelCase , )
if accelerator.is_main_process:
_UpperCAmelCase : List[Any] = []
for task in tqdm(range(__lowerCAmelCase ) ):
_UpperCAmelCase : str = human_eval["test"][task]["test"]
_UpperCAmelCase : Union[str, Any] = F"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
_UpperCAmelCase , _UpperCAmelCase : str = code_eval_metric.compute(
references=__lowerCAmelCase , predictions=__lowerCAmelCase , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 40
| 1
|
'''simple docstring'''
lowerCamelCase__ = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 40
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 40
| 1
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase (__lowerCAmelCase ):
return [ord(__lowerCAmelCase ) - 96 for elem in plain]
def __lowerCAmelCase (__lowerCAmelCase ):
return "".join(chr(elem + 96 ) for elem in encoded )
def __lowerCAmelCase ():
_UpperCAmelCase : int = encode(input("-> " ).strip().lower() )
print("Encoded: " , __lowerCAmelCase )
print("Decoded:" , decode(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
| 40
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_UpperCAmelCase : str = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" , revision=__lowerCAmelCase )
| 40
| 1
|
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __lowerCAmelCase (__lowerCAmelCase ):
return EnvironmentCommand()
class lowerCAmelCase__ ( UpperCAmelCase__ ):
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : ArgumentParser ) ->int:
'''simple docstring'''
_UpperCAmelCase : List[str] = parser.add_parser("env" )
download_parser.set_defaults(func=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = huggingface_hub.__version__
_UpperCAmelCase : Dict = "not installed"
_UpperCAmelCase : Union[str, Any] = "NA"
if is_torch_available():
import torch
_UpperCAmelCase : str = torch.__version__
_UpperCAmelCase : Union[str, Any] = torch.cuda.is_available()
_UpperCAmelCase : str = "not installed"
if is_transformers_available():
import transformers
_UpperCAmelCase : Tuple = transformers.__version__
_UpperCAmelCase : List[Any] = "not installed"
if is_accelerate_available():
import accelerate
_UpperCAmelCase : Any = accelerate.__version__
_UpperCAmelCase : Optional[Any] = "not installed"
if is_xformers_available():
import xformers
_UpperCAmelCase : Optional[Any] = xformers.__version__
_UpperCAmelCase : Any = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""",
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(lowerCamelCase__ ) )
return info
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 40
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "pixel_values"
lowerCAmelCase : Dict = False
lowerCAmelCase : Union[str, Any] = TimmBackboneConfig
def __init__( self : List[str] , lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
requires_backends(self , "timm" )
super().__init__(lowerCamelCase__ )
_UpperCAmelCase : Any = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
_UpperCAmelCase : Optional[Any] = getattr(lowerCamelCase__ , "use_pretrained_backbone" , lowerCamelCase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
_UpperCAmelCase : int = config.out_indices if getattr(lowerCamelCase__ , "out_indices" , lowerCamelCase__ ) is not None else (-1,)
_UpperCAmelCase : List[Any] = timm.create_model(
config.backbone , pretrained=lowerCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCamelCase__ , **lowerCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_UpperCAmelCase : List[str] = self._backbone.return_layers
_UpperCAmelCase : Optional[int] = {layer["module"]: str(lowerCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
_UpperCAmelCase : Any = kwargs.pop("config" , TimmBackboneConfig() )
_UpperCAmelCase : Dict = kwargs.pop("use_timm_backbone" , lowerCamelCase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
_UpperCAmelCase : str = kwargs.pop("num_channels" , config.num_channels )
_UpperCAmelCase : Dict = kwargs.pop("features_only" , config.features_only )
_UpperCAmelCase : str = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
_UpperCAmelCase : Optional[Any] = kwargs.pop("out_indices" , config.out_indices )
_UpperCAmelCase : Dict = TimmBackboneConfig(
backbone=lowerCamelCase__ , num_channels=lowerCamelCase__ , features_only=lowerCamelCase__ , use_pretrained_backbone=lowerCamelCase__ , out_indices=lowerCamelCase__ , )
return super()._from_config(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : Dict ) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
_UpperCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Dict = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_UpperCAmelCase : Optional[int] = self._all_layers
_UpperCAmelCase : List[str] = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self._return_layers
_UpperCAmelCase : Tuple = tuple(hidden_states[i] for i in self.out_indices )
else:
_UpperCAmelCase : Any = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Dict = tuple(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tuple(lowerCamelCase__ ) if hidden_states is not None else None
if not return_dict:
_UpperCAmelCase : Dict = (feature_maps,)
if output_hidden_states:
_UpperCAmelCase : List[str] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase__ , hidden_states=lowerCamelCase__ , attentions=lowerCamelCase__ )
| 40
| 1
|
'''simple docstring'''
lowerCamelCase__ = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def __lowerCAmelCase (__lowerCAmelCase ):
assert type(__lowerCAmelCase ) in (int, float) and decimal == int(__lowerCAmelCase )
_UpperCAmelCase : str = int(__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = ""
_UpperCAmelCase : Union[str, Any] = False
if decimal < 0:
_UpperCAmelCase : Dict = True
decimal *= -1
while decimal > 0:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = divmod(__lowerCAmelCase , 16 )
_UpperCAmelCase : Any = values[remainder] + hexadecimal
_UpperCAmelCase : List[str] = "0x" + hexadecimal
if negative:
_UpperCAmelCase : List[Any] = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 40
| 1
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCamelCase__ = '<<<<<<< This should probably be modified because it mentions: '
lowerCamelCase__ = '=======\n>>>>>>>\n'
lowerCamelCase__ = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
lowerCamelCase__ = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def __lowerCAmelCase (__lowerCAmelCase ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : ArgumentParser ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : str , *lowerCamelCase__ : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = get_logger("datasets-cli/converting" )
_UpperCAmelCase : List[str] = tfds_path
_UpperCAmelCase : str = datasets_directory
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
_UpperCAmelCase : Optional[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_UpperCAmelCase : List[Any] = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
_UpperCAmelCase : Tuple = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : int = {}
if os.path.isdir(self._tfds_path ):
_UpperCAmelCase : Dict = os.listdir(lowerCamelCase__ )
else:
_UpperCAmelCase : Union[str, Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
_UpperCAmelCase : int = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : str = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
if not os.path.isfile(lowerCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(lowerCamelCase__ , encoding="utf-8" ) as f:
_UpperCAmelCase : Any = f.readlines()
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Any = []
for line in lines:
_UpperCAmelCase : str = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_UpperCAmelCase : List[Any] = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
_UpperCAmelCase : Tuple = ""
continue
elif "from absl import logging" in out_line:
_UpperCAmelCase : Optional[Any] = "from datasets import logging\n"
elif "getLogger" in out_line:
_UpperCAmelCase : str = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_UpperCAmelCase : int = True
_UpperCAmelCase : Optional[int] = list(filter(lambda lowerCamelCase__ : e in out_line , lowerCamelCase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase__ ) + "\n" )
out_lines.append(lowerCamelCase__ )
out_lines.append(lowerCamelCase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
_UpperCAmelCase : Dict = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_UpperCAmelCase : Optional[int] = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , lowerCamelCase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
_UpperCAmelCase : List[str] = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_UpperCAmelCase : int = True
out_lines.append(lowerCamelCase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_UpperCAmelCase : List[str] = f_name.replace(".py" , "" )
_UpperCAmelCase : Optional[Any] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCamelCase__ )
if needs_manual_update:
with_manual_update.append(lowerCamelCase__ )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.writelines(lowerCamelCase__ )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
_UpperCAmelCase : List[Any] = os.path.basename(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(lowerCamelCase__ , lowerCamelCase__ )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 40
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[Any]=10 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Tuple=32 * 8 , lowerCamelCase__ : int=32 * 8 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Any=64 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : Optional[Any] = use_auxiliary_loss
_UpperCAmelCase : Dict = num_queries
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Union[str, Any] = min_size
_UpperCAmelCase : Optional[int] = max_size
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : Optional[int] = hidden_dim
_UpperCAmelCase : Any = hidden_dim
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
_UpperCAmelCase : int = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
_UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCAmelCase : List[str] = self.num_queries
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Union[str, Any] = [1, 1, 1, 1]
_UpperCAmelCase : Any = self.num_channels
_UpperCAmelCase : int = 64
_UpperCAmelCase : int = 1_28
_UpperCAmelCase : int = self.hidden_dim
_UpperCAmelCase : List[Any] = self.hidden_dim
_UpperCAmelCase : Any = self.hidden_dim
return config
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = self.prepare_config_and_inputs()
_UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = output.encoder_hidden_states
_UpperCAmelCase : List[str] = output.pixel_decoder_hidden_states
_UpperCAmelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict=False ) ->str:
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : List[Any] = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : int = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : int = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCAmelCase : str = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = MaskaFormerModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class(lowerCamelCase__ )
_UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple = [*signature.parameters.keys()]
_UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCAmelCase : str = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = (self.model_tester.min_size,) * 2
_UpperCAmelCase : Optional[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
_UpperCAmelCase : int = self.model_tester.get_config()
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : str = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : Optional[Any] = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Any = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def __lowerCAmelCase ():
_UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
_UpperCAmelCase : int = self.default_image_processor
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : str = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : Dict = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : str = model(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : List[Any] = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : Tuple = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : List[Any] = self.default_image_processor
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
_UpperCAmelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_UpperCAmelCase : List[str] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_UpperCAmelCase : List[Any] = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
_UpperCAmelCase : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase : str = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : List[str] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase : str = inputs["pixel_values"].to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["mask_labels"]]
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase : int = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 40
| 1
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : dict[str, list[str]] , lowerCamelCase__ : str ) ->None:
'''simple docstring'''
_UpperCAmelCase : Dict = graph
# mapping node to its parent in resulting breadth first tree
_UpperCAmelCase : dict[str, str | None] = {}
_UpperCAmelCase : List[Any] = source_vertex
def lowerCAmelCase__ ( self : Optional[int] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {self.source_vertex}
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[str] = [self.source_vertex] # first in first out queue
while queue:
_UpperCAmelCase : int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = vertex
queue.append(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
_UpperCAmelCase : int = self.parent.get(lowerCamelCase__ )
if target_vertex_parent is None:
_UpperCAmelCase : Tuple = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(lowerCamelCase__ )
return self.shortest_path(lowerCamelCase__ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 40
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def __lowerCAmelCase (__lowerCAmelCase ):
return int(x / 2**20 )
class lowerCAmelCase__ :
def __enter__( self : int ) ->Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_UpperCAmelCase : Tuple = torch.cuda.memory_allocated()
return self
def __exit__( self : Tuple , *lowerCamelCase__ : str ) ->int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
_UpperCAmelCase : List[str] = torch.cuda.memory_allocated()
_UpperCAmelCase : Tuple = torch.cuda.max_memory_allocated()
_UpperCAmelCase : List[Any] = bamb(self.end - self.begin )
_UpperCAmelCase : int = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = 16 , __lowerCAmelCase = "bert-base-cased" , __lowerCAmelCase = 320 , __lowerCAmelCase = 160 , ):
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : Any = load_dataset(
"glue" , "mrpc" , split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase : int = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_UpperCAmelCase : Any = DataLoader(
tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
_UpperCAmelCase : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : List[Any] = config["lr"]
_UpperCAmelCase : List[Any] = int(config["num_epochs"] )
_UpperCAmelCase : int = int(config["seed"] )
_UpperCAmelCase : Union[str, Any] = int(config["batch_size"] )
_UpperCAmelCase : Tuple = args.model_name_or_path
set_seed(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : List[str] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase )
# Instantiate optimizer
_UpperCAmelCase : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase : str = optimizer_cls(params=model.parameters() , lr=__lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[int] = (len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=0 , num_training_steps=__lowerCAmelCase , )
else:
_UpperCAmelCase : Optional[Any] = DummyScheduler(__lowerCAmelCase , total_num_steps=__lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase : str = 0
# Now we train the model
_UpperCAmelCase : Optional[Any] = {}
for epoch in range(__lowerCAmelCase , __lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = model(**__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = outputs.loss
_UpperCAmelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_UpperCAmelCase : Optional[int] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase ():
_UpperCAmelCase : Any = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__lowerCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCAmelCase , )
parser.add_argument(
"--output_dir" , type=__lowerCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=__lowerCAmelCase , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=__lowerCAmelCase , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=__lowerCAmelCase , default=1 , help="Number of train epochs." , )
_UpperCAmelCase : Tuple = parser.parse_args()
_UpperCAmelCase : Optional[Any] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 40
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any]=13 , lowerCamelCase__ : int=7 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[int]=99 , lowerCamelCase__ : Optional[Any]=32 , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : Any=4 , lowerCamelCase__ : int=37 , lowerCamelCase__ : Dict="gelu" , lowerCamelCase__ : Optional[Any]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : int=5_12 , lowerCamelCase__ : List[Any]=16 , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : List[str]=0.0_2 , lowerCamelCase__ : Dict=3 , lowerCamelCase__ : str=4 , lowerCamelCase__ : str=None , lowerCamelCase__ : List[Any]=0 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : Any = seq_length
_UpperCAmelCase : int = is_training
_UpperCAmelCase : Any = use_input_mask
_UpperCAmelCase : Tuple = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : Tuple = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : int = intermediate_size
_UpperCAmelCase : Tuple = hidden_act
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Optional[int] = type_vocab_size
_UpperCAmelCase : List[Any] = type_sequence_label_size
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : Optional[int] = num_choices
_UpperCAmelCase : List[str] = scope
_UpperCAmelCase : int = projection_dim
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Tuple = None
if self.use_token_type_ids:
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : str = None
_UpperCAmelCase : Tuple = None
if self.use_labels:
_UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : Optional[int] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
_UpperCAmelCase : List[Any] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : int ) ->int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = TFDPRContextEncoder(config=lowerCamelCase__ )
_UpperCAmelCase : Dict = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
_UpperCAmelCase : Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Any = TFDPRQuestionEncoder(config=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
_UpperCAmelCase : int = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase : List[str] = TFDPRReader(config=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowerCAmelCase__ ( self : Optional[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Optional[Any] = config_and_inputs
_UpperCAmelCase : Tuple = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCAmelCase : List[str] = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCAmelCase : int = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Tuple = False
lowerCAmelCase : int = False
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = TFDPRModelTester(self )
_UpperCAmelCase : Tuple = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def lowerCAmelCase__ ( self : List[str] ) ->List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : List[str] ) ->Dict:
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Dict = TFDPRContextEncoder.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : int = TFDPRContextEncoder.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : str = TFDPRQuestionEncoder.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : List[Any] = TFDPRReader.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : int ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
_UpperCAmelCase : Tuple = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
_UpperCAmelCase : int = model(lowerCamelCase__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_UpperCAmelCase : Dict = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 40
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase__ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCamelCase__ = {
'169M': 768,
'430M': 1_024,
'1B5': 2_048,
'3B': 2_560,
'7B': 4_096,
'14B': 5_120,
}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = list(state_dict.keys() )
for name in state_dict_keys:
_UpperCAmelCase : Optional[int] = state_dict.pop(__lowerCAmelCase )
# emb -> embedding
if name.startswith("emb." ):
_UpperCAmelCase : Tuple = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
_UpperCAmelCase : Optional[int] = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
_UpperCAmelCase : Union[str, Any] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __lowerCAmelCase )
# ffn -> feed_forward
_UpperCAmelCase : Dict = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __lowerCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
_UpperCAmelCase : int = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
_UpperCAmelCase : Union[str, Any] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
_UpperCAmelCase : int = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
_UpperCAmelCase : List[str] = "rwkv." + name
_UpperCAmelCase : Optional[Any] = weight
return state_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
_UpperCAmelCase : str = 50_277
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
_UpperCAmelCase : Tuple = PreTrainedTokenizerFast(tokenizer_file=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
# 2. Build the config
_UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_UpperCAmelCase : Optional[Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_UpperCAmelCase : Any = RwkvConfig(
vocab_size=__lowerCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCAmelCase )
# 3. Download model file then convert state_dict
_UpperCAmelCase : str = hf_hub_download(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = torch.load(__lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase : Any = convert_state_dict(__lowerCAmelCase )
# 4. Split in shards and save
_UpperCAmelCase , _UpperCAmelCase : List[str] = shard_checkpoint(__lowerCAmelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if index is not None:
_UpperCAmelCase : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# Save the index as well
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
_UpperCAmelCase : int = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n"
f.write(__lowerCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
_UpperCAmelCase : Union[str, Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_UpperCAmelCase : Union[str, Any] = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
_UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
model.push_to_hub(__lowerCAmelCase , max_shard_size="2GB" )
tokenizer.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowerCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 40
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : str=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Tuple=99 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Optional[int]=4 , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : int = use_attention_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Union[str, Any] = num_choices
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_attention_mask:
_UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
if self.use_token_type_ids:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Tuple = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Any = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : str = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )[0]
_UpperCAmelCase : int = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
_UpperCAmelCase : int = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
_UpperCAmelCase : str = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 40
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __lowerCAmelCase (__lowerCAmelCase ):
return np.maximum(0 , __lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 40
| 1
|
'''simple docstring'''
from math import factorial
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = real
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Any = [1] * rank
else:
_UpperCAmelCase : Dict = rank
def __repr__( self : str ) ->List[str]:
'''simple docstring'''
return (
F"""{self.real}+"""
F"""{'+'.join(str(lowerCamelCase__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCamelCase__ )
def __add__( self : Dict , lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return Dual(self.real + other , self.duals )
_UpperCAmelCase : Optional[int] = self.duals.copy()
_UpperCAmelCase : Optional[int] = other.duals.copy()
if len(lowerCamelCase__ ) > len(lowerCamelCase__ ):
o_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
elif len(lowerCamelCase__ ) < len(lowerCamelCase__ ):
s_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
_UpperCAmelCase : Union[str, Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCamelCase__ )
lowerCAmelCase : Tuple = __add__
def __sub__( self : List[Any] , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
return self + other * -1
def __mul__( self : List[str] , lowerCamelCase__ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = __mul__
def __truediv__( self : Optional[Any] , lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCamelCase__ )
raise ValueError
def __floordiv__( self : str , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Tuple = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCamelCase__ )
raise ValueError
def __pow__( self : Tuple , lowerCamelCase__ : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
if n < 0 or isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
_UpperCAmelCase : str = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not callable(__lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
_UpperCAmelCase : int = Dual(__lowerCAmelCase , 1 )
_UpperCAmelCase : Optional[int] = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCAmelCase (__lowerCAmelCase ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 40
|
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCAmelCase (__lowerCAmelCase ):
random.seed(__lowerCAmelCase )
np.random.seed(__lowerCAmelCase )
torch.manual_seed(__lowerCAmelCase )
torch.cuda.manual_seed_all(__lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class lowerCAmelCase__ :
def __init__( self : List[Any] , lowerCamelCase__ : Iterable[torch.nn.Parameter] , lowerCamelCase__ : float = 0.9_9_9_9 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 0 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Union[float, int] = 1.0 , lowerCamelCase__ : Union[float, int] = 2 / 3 , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : Dict[str, Any] = None , **lowerCamelCase__ : Optional[int] , ) ->Optional[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : List[Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : List[str] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCAmelCase : Optional[int] = True
if kwargs.get("max_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Tuple = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : str = kwargs["max_value"]
if kwargs.get("min_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Optional[int] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : Tuple = kwargs["min_value"]
_UpperCAmelCase : Optional[Any] = list(lowerCamelCase__ )
_UpperCAmelCase : Dict = [p.clone().detach() for p in parameters]
if kwargs.get("device" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Any = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
self.to(device=kwargs["device"] )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = decay
_UpperCAmelCase : Any = min_decay
_UpperCAmelCase : Optional[int] = update_after_step
_UpperCAmelCase : str = use_ema_warmup
_UpperCAmelCase : Union[str, Any] = inv_gamma
_UpperCAmelCase : Union[str, Any] = power
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : List[str] = None # set in `step()`
_UpperCAmelCase : Optional[int] = model_cls
_UpperCAmelCase : Union[str, Any] = model_config
@classmethod
def lowerCAmelCase__ ( cls : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->"EMAModel":
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = model_cls.load_config(lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model_cls.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : List[str] = cls(model.parameters() , model_cls=lowerCamelCase__ , model_config=model.config )
ema_model.load_state_dict(lowerCamelCase__ )
return ema_model
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_UpperCAmelCase : int = self.model_cls.from_config(self.model_config )
_UpperCAmelCase : Union[str, Any] = self.state_dict()
state_dict.pop("shadow_params" , lowerCamelCase__ )
model.register_to_config(**lowerCamelCase__ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : int ) ->float:
'''simple docstring'''
_UpperCAmelCase : int = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCAmelCase : int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCAmelCase : Any = (1 + step) / (10 + step)
_UpperCAmelCase : int = min(lowerCamelCase__ , self.decay )
# make sure decay is not smaller than min_decay
_UpperCAmelCase : Union[str, Any] = max(lowerCamelCase__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->Dict:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : Any = parameters.parameters()
_UpperCAmelCase : Dict = list(lowerCamelCase__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCAmelCase : Tuple = self.get_decay(self.optimization_step )
_UpperCAmelCase : Any = decay
_UpperCAmelCase : Optional[Any] = 1 - decay
_UpperCAmelCase : Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCAmelCase : str = deepspeed.zero.GatheredParameters(lowerCamelCase__ , modifier_rank=lowerCamelCase__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[str] = list(lowerCamelCase__ )
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[int]=None ) ->None:
'''simple docstring'''
_UpperCAmelCase : str = [
p.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ ) if p.is_floating_point() else p.to(device=lowerCamelCase__ )
for p in self.shadow_params
]
def lowerCAmelCase__ ( self : List[Any] ) ->dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , lowerCamelCase__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCAmelCase : int = None
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : dict ) ->None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = copy.deepcopy(lowerCamelCase__ )
_UpperCAmelCase : List[str] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_UpperCAmelCase : Union[str, Any] = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , lowerCamelCase__ ):
raise ValueError("Invalid min_decay" )
_UpperCAmelCase : List[str] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCamelCase__ ):
raise ValueError("Invalid optimization_step" )
_UpperCAmelCase : List[Any] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCamelCase__ ):
raise ValueError("Invalid update_after_step" )
_UpperCAmelCase : str = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCamelCase__ ):
raise ValueError("Invalid use_ema_warmup" )
_UpperCAmelCase : int = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
_UpperCAmelCase : Any = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
_UpperCAmelCase : List[str] = state_dict.get("shadow_params" , lowerCamelCase__ )
if shadow_params is not None:
_UpperCAmelCase : Optional[Any] = shadow_params
if not isinstance(self.shadow_params , lowerCamelCase__ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(lowerCamelCase__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 40
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : List[Any] = "data2vec-vision"
def __init__( self : Any , lowerCamelCase__ : List[str]=7_68 , lowerCamelCase__ : Optional[Any]=12 , lowerCamelCase__ : List[str]=12 , lowerCamelCase__ : Optional[Any]=30_72 , lowerCamelCase__ : Dict="gelu" , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : str=0.0 , lowerCamelCase__ : List[str]=0.0_2 , lowerCamelCase__ : Any=1E-12 , lowerCamelCase__ : Any=2_24 , lowerCamelCase__ : Dict=16 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : str=False , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : int=False , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : str=True , lowerCamelCase__ : int=[3, 5, 7, 11] , lowerCamelCase__ : Optional[Any]=[1, 2, 3, 6] , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Optional[Any]=0.4 , lowerCamelCase__ : Dict=2_56 , lowerCamelCase__ : Optional[int]=1 , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : Dict=2_55 , **lowerCamelCase__ : List[Any] , ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Optional[Any] = layer_norm_eps
_UpperCAmelCase : Tuple = image_size
_UpperCAmelCase : Optional[Any] = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Union[str, Any] = use_mask_token
_UpperCAmelCase : List[Any] = use_absolute_position_embeddings
_UpperCAmelCase : Union[str, Any] = use_relative_position_bias
_UpperCAmelCase : Any = use_shared_relative_position_bias
_UpperCAmelCase : List[str] = layer_scale_init_value
_UpperCAmelCase : Dict = drop_path_rate
_UpperCAmelCase : List[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_UpperCAmelCase : Dict = out_indices
_UpperCAmelCase : Any = pool_scales
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase : Union[str, Any] = use_auxiliary_head
_UpperCAmelCase : Optional[int] = auxiliary_loss_weight
_UpperCAmelCase : List[str] = auxiliary_channels
_UpperCAmelCase : List[Any] = auxiliary_num_convs
_UpperCAmelCase : Optional[Any] = auxiliary_concat_input
_UpperCAmelCase : List[Any] = semantic_loss_ignore_index
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = version.parse("1.11" )
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self : Dict ) ->float:
'''simple docstring'''
return 1E-4
| 40
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
lowerCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowerCamelCase__ = state_dict['cls.predictions.decoder.weight']
lowerCamelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.dense.{w}''']
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 40
| 1
|
'''simple docstring'''
import json
import sys
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
with open(__lowerCAmelCase , encoding="utf-8" ) as f:
_UpperCAmelCase : Optional[Any] = json.load(__lowerCAmelCase )
_UpperCAmelCase : Dict = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = results[benchmark_name]
_UpperCAmelCase : str = benchmark_name.split("/" )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
_UpperCAmelCase : int = "| metric |"
_UpperCAmelCase : Dict = "|--------|"
_UpperCAmelCase : int = "| new / old (diff) |"
for metric_name in sorted(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = benchmark_res[metric_name]
_UpperCAmelCase : Dict = metric_vals["new"]
_UpperCAmelCase : Dict = metric_vals.get("old" , __lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = metric_vals.get("diff" , __lowerCAmelCase )
_UpperCAmelCase : Any = F""" {new_val:f}""" if isinstance(__lowerCAmelCase , (int, float) ) else "None"
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(__lowerCAmelCase , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(__lowerCAmelCase , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(__lowerCAmelCase ) )
if __name__ == "__main__":
lowerCamelCase__ = sys.argv[1]
lowerCamelCase__ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 40
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : dict[str, list[str]] , lowerCamelCase__ : str ) ->None:
'''simple docstring'''
_UpperCAmelCase : Dict = graph
# mapping node to its parent in resulting breadth first tree
_UpperCAmelCase : dict[str, str | None] = {}
_UpperCAmelCase : List[Any] = source_vertex
def lowerCAmelCase__ ( self : Optional[int] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {self.source_vertex}
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[str] = [self.source_vertex] # first in first out queue
while queue:
_UpperCAmelCase : int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = vertex
queue.append(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
_UpperCAmelCase : int = self.parent.get(lowerCamelCase__ )
if target_vertex_parent is None:
_UpperCAmelCase : Tuple = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(lowerCamelCase__ )
return self.shortest_path(lowerCamelCase__ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 40
| 1
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = len(__lowerCAmelCase )
print("The following activities are selected:" )
# The first activity is always selected
_UpperCAmelCase : Union[str, Any] = 0
print(__lowerCAmelCase , end="," )
# Consider rest of the activities
for j in range(__lowerCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__lowerCAmelCase , end="," )
_UpperCAmelCase : Union[str, Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = [1, 3, 0, 5, 8, 5]
lowerCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 40
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = ["image_processor", "tokenizer"]
lowerCAmelCase : List[Any] = "BlipImageProcessor"
lowerCAmelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = False
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.image_processor
def __call__( self : Dict , lowerCamelCase__ : ImageInput = None , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Tuple , ) ->BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_UpperCAmelCase : Optional[int] = self.tokenizer
_UpperCAmelCase : List[Any] = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
return text_encoding
# add pixel_values
_UpperCAmelCase : Optional[int] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
if text is not None:
_UpperCAmelCase : Dict = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
else:
_UpperCAmelCase : int = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def lowerCAmelCase__ ( self : List[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.tokenizer.model_input_names
_UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 40
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowerCAmelCase__ :
def __init__( self : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : float = 0 ) ->None:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[str] = row, column
_UpperCAmelCase : List[str] = [[default_value for c in range(lowerCamelCase__ )] for r in range(lowerCamelCase__ )]
def __str__( self : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase : Dict = F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
_UpperCAmelCase : Optional[Any] = 0
for row_vector in self.array:
for obj in row_vector:
_UpperCAmelCase : Union[str, Any] = max(lowerCamelCase__ , len(str(lowerCamelCase__ ) ) )
_UpperCAmelCase : Tuple = F"""%{max_element_length}s"""
# Make string and return
def single_line(lowerCamelCase__ : list[float] ) -> str:
nonlocal string_format_identifier
_UpperCAmelCase : int = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase__ ) for row_vector in self.array )
return s
def __repr__( self : Any ) ->str:
'''simple docstring'''
return str(self )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tuple[int, int] ) ->bool:
'''simple docstring'''
if not (isinstance(lowerCamelCase__ , (list, tuple) ) and len(lowerCamelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Dict , lowerCamelCase__ : tuple[int, int] ) ->Any:
'''simple docstring'''
assert self.validate_indicies(lowerCamelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[Any] , lowerCamelCase__ : tuple[int, int] , lowerCamelCase__ : float ) ->None:
'''simple docstring'''
assert self.validate_indicies(lowerCamelCase__ )
_UpperCAmelCase : Dict = value
def __add__( self : Tuple , lowerCamelCase__ : Matrix ) ->Matrix:
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert self.row == another.row and self.column == another.column
# Add
_UpperCAmelCase : List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_UpperCAmelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self : Optional[int] ) ->Matrix:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_UpperCAmelCase : List[Any] = -self[r, c]
return result
def __sub__( self : List[Any] , lowerCamelCase__ : Matrix ) ->Matrix:
'''simple docstring'''
return self + (-another)
def __mul__( self : Optional[int] , lowerCamelCase__ : int | float | Matrix ) ->Matrix:
'''simple docstring'''
if isinstance(lowerCamelCase__ , (int, float) ): # Scalar multiplication
_UpperCAmelCase : Optional[Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_UpperCAmelCase : List[str] = self[r, c] * another
return result
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ): # Matrix multiplication
assert self.column == another.row
_UpperCAmelCase : str = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_UpperCAmelCase : List[Any] = F"""Unsupported type given for another ({type(lowerCamelCase__ )})"""
raise TypeError(lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Matrix:
'''simple docstring'''
_UpperCAmelCase : Tuple = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
_UpperCAmelCase : int = self[r, c]
return result
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Matrix , lowerCamelCase__ : Matrix ) ->Any:
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_UpperCAmelCase : Optional[int] = v.transpose()
_UpperCAmelCase : List[str] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __lowerCAmelCase ():
# a^(-1)
_UpperCAmelCase : int = Matrix(3 , 3 , 0 )
for i in range(3 ):
_UpperCAmelCase : Union[str, Any] = 1
print(F"""a^(-1) is {ainv}""" )
# u, v
_UpperCAmelCase : Optional[int] = Matrix(3 , 1 , 0 )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = 1, 2, -3
_UpperCAmelCase : Union[str, Any] = Matrix(3 , 1 , 0 )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = 4, -2, 5
print(F"""u is {u}""" )
print(F"""v is {v}""" )
print(F"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowerCAmelCase , __lowerCAmelCase )}""" )
def __lowerCAmelCase ():
import doctest
doctest.testmod()
testa()
| 40
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ): # noqa: E741
_UpperCAmelCase : List[str] = len(__lowerCAmelCase )
_UpperCAmelCase : str = 0
_UpperCAmelCase : List[str] = [0] * n
_UpperCAmelCase : int = [False] * n
_UpperCAmelCase : Dict = [False] * n
def dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if parent == root:
out_edge_count += 1
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : str = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_UpperCAmelCase : List[str] = dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Tuple = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_UpperCAmelCase : Dict = True
# AP found via cycle
if at == low[to]:
_UpperCAmelCase : Dict = True
else:
_UpperCAmelCase : Optional[int] = min(low[at] , __lowerCAmelCase )
return out_edge_count
for i in range(__lowerCAmelCase ):
if not visited[i]:
_UpperCAmelCase : str = 0
_UpperCAmelCase : Tuple = dfs(__lowerCAmelCase , __lowerCAmelCase , -1 , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = out_edge_count > 1
for x in range(len(__lowerCAmelCase ) ):
if is_art[x] is True:
print(__lowerCAmelCase )
# Adjacency list of graph
lowerCamelCase__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 40
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'shi-labs/dinat-mini-in1k-224': 'https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "dinat"
lowerCAmelCase : Dict = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[Any] , lowerCamelCase__ : int=4 , lowerCamelCase__ : Union[str, Any]=3 , lowerCamelCase__ : Any=64 , lowerCamelCase__ : Dict=[3, 4, 6, 5] , lowerCamelCase__ : Dict=[2, 4, 8, 16] , lowerCamelCase__ : Union[str, Any]=7 , lowerCamelCase__ : Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCamelCase__ : List[str]=3.0 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=0.0 , lowerCamelCase__ : str=0.0 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : List[str]="gelu" , lowerCamelCase__ : Optional[int]=0.0_2 , lowerCamelCase__ : Optional[Any]=1E-5 , lowerCamelCase__ : str=0.0 , lowerCamelCase__ : int=None , lowerCamelCase__ : Any=None , **lowerCamelCase__ : int , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = patch_size
_UpperCAmelCase : Any = num_channels
_UpperCAmelCase : Optional[Any] = embed_dim
_UpperCAmelCase : Tuple = depths
_UpperCAmelCase : Dict = len(lowerCamelCase__ )
_UpperCAmelCase : Tuple = num_heads
_UpperCAmelCase : Tuple = kernel_size
_UpperCAmelCase : List[str] = dilations
_UpperCAmelCase : Any = mlp_ratio
_UpperCAmelCase : Optional[Any] = qkv_bias
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Dict = drop_path_rate
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : Optional[Any] = layer_norm_eps
_UpperCAmelCase : Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase : List[Any] = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
_UpperCAmelCase : Optional[Any] = layer_scale_init_value
_UpperCAmelCase : int = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
| 40
|
'''simple docstring'''
def __lowerCAmelCase ():
_UpperCAmelCase : str = 0
for i in range(1 , 1_001 ):
total += i**i
return str(__lowerCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 40
| 1
|
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowerCamelCase__ = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , lowerCamelCase__ : int = 14 ) ->None:
'''simple docstring'''
if group not in primes:
raise ValueError("Unsupported Group" )
_UpperCAmelCase : List[Any] = primes[group]["prime"]
_UpperCAmelCase : List[Any] = primes[group]["generator"]
_UpperCAmelCase : int = int(hexlify(urandom(32 ) ) , base=16 )
def lowerCAmelCase__ ( self : Optional[Any] ) ->str:
'''simple docstring'''
return hex(self.__private_key )[2:]
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Tuple = pow(self.generator , self.__private_key , self.prime )
return hex(lowerCamelCase__ )[2:]
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : int ) ->bool:
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = int(lowerCamelCase__ , base=16 )
if not self.is_valid_public_key(lowerCamelCase__ ):
raise ValueError("Invalid public key" )
_UpperCAmelCase : Dict = pow(lowerCamelCase__ , self.__private_key , self.prime )
return shaaaa(str(lowerCamelCase__ ).encode() ).hexdigest()
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : int , lowerCamelCase__ : int ) ->bool:
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase__ , (prime - 1) // 2 , lowerCamelCase__ ) == 1
)
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : int = 14 ) ->str:
'''simple docstring'''
_UpperCAmelCase : Any = int(lowerCamelCase__ , base=16 )
_UpperCAmelCase : List[str] = int(lowerCamelCase__ , base=16 )
_UpperCAmelCase : Any = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("Invalid public key" )
_UpperCAmelCase : Any = pow(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return shaaaa(str(lowerCamelCase__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if dataset.ndim != value_array.ndim:
_UpperCAmelCase : Optional[Any] = (
"Wrong input data's dimensions... "
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCAmelCase : Optional[int] = (
"Wrong input data's shape... "
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_UpperCAmelCase : Union[str, Any] = (
"Input data have different datatype... "
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = []
for value in value_array:
_UpperCAmelCase : List[str] = euclidean(__lowerCAmelCase , dataset[0] )
_UpperCAmelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCAmelCase : int = euclidean(__lowerCAmelCase , __lowerCAmelCase )
if dist > temp_dist:
_UpperCAmelCase : Tuple = temp_dist
_UpperCAmelCase : Dict = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return np.dot(__lowerCAmelCase , __lowerCAmelCase ) / (norm(__lowerCAmelCase ) * norm(__lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Union[str, Any] = "upernet"
def __init__( self : int , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=5_12 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Optional[Any]=[1, 2, 3, 6] , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Optional[int]=0.4 , lowerCamelCase__ : Union[str, Any]=3_84 , lowerCamelCase__ : Optional[Any]=2_56 , lowerCamelCase__ : int=1 , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : int=2_55 , **lowerCamelCase__ : Dict , ) ->List[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase : Any = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : List[str] = backbone_config.get("model_type" )
_UpperCAmelCase : int = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase : str = config_class.from_dict(lowerCamelCase__ )
_UpperCAmelCase : List[str] = backbone_config
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : str = pool_scales
_UpperCAmelCase : List[str] = use_auxiliary_head
_UpperCAmelCase : Optional[int] = auxiliary_loss_weight
_UpperCAmelCase : Tuple = auxiliary_in_channels
_UpperCAmelCase : str = auxiliary_channels
_UpperCAmelCase : Tuple = auxiliary_num_convs
_UpperCAmelCase : Optional[int] = auxiliary_concat_input
_UpperCAmelCase : List[str] = loss_ignore_index
def lowerCAmelCase__ ( self : int ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : List[str] = self.backbone_config.to_dict()
_UpperCAmelCase : Dict = self.__class__.model_type
return output
| 40
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase__ = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
_UpperCAmelCase : Optional[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowerCamelCase__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any=None ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCAmelCase : Tuple = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCAmelCase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_UpperCAmelCase : Tuple = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowerCamelCase__ , "w" , newline="\n" ) as f:
f.write(lowerCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__ )
with open(lowerCamelCase__ , "r" ) as f:
self.assertTrue(f.read() , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowerCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
# Copy consistency with a really long name
_UpperCAmelCase : int = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , lowerCamelCase__ , lowerCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowerCamelCase__ , overwrite_result=re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
| 40
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = SwinvaConfig()
_UpperCAmelCase : Optional[Any] = swinva_name.split("_" )
_UpperCAmelCase : int = name_split[1]
if "to" in name_split[3]:
_UpperCAmelCase : int = int(name_split[3][-3:] )
else:
_UpperCAmelCase : Dict = int(name_split[3] )
if "to" in name_split[2]:
_UpperCAmelCase : Optional[int] = int(name_split[2][-2:] )
else:
_UpperCAmelCase : Dict = int(name_split[2][6:] )
if model_size == "tiny":
_UpperCAmelCase : Any = 96
_UpperCAmelCase : Optional[Any] = (2, 2, 6, 2)
_UpperCAmelCase : Optional[int] = (3, 6, 12, 24)
elif model_size == "small":
_UpperCAmelCase : Optional[int] = 96
_UpperCAmelCase : Any = (2, 2, 18, 2)
_UpperCAmelCase : Union[str, Any] = (3, 6, 12, 24)
elif model_size == "base":
_UpperCAmelCase : Optional[int] = 128
_UpperCAmelCase : str = (2, 2, 18, 2)
_UpperCAmelCase : Any = (4, 8, 16, 32)
else:
_UpperCAmelCase : int = 192
_UpperCAmelCase : Optional[Any] = (2, 2, 18, 2)
_UpperCAmelCase : List[Any] = (6, 12, 24, 48)
if "to" in swinva_name:
_UpperCAmelCase : Union[str, Any] = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_UpperCAmelCase : List[Any] = 21_841
_UpperCAmelCase : str = "huggingface/label-files"
_UpperCAmelCase : List[str] = "imagenet-22k-id2label.json"
_UpperCAmelCase : Tuple = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : List[str] = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : Any = idalabel
_UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
else:
_UpperCAmelCase : int = 1_000
_UpperCAmelCase : Dict = "huggingface/label-files"
_UpperCAmelCase : List[Any] = "imagenet-1k-id2label.json"
_UpperCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : str = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : Dict = idalabel
_UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()}
_UpperCAmelCase : Union[str, Any] = img_size
_UpperCAmelCase : int = num_classes
_UpperCAmelCase : List[Any] = embed_dim
_UpperCAmelCase : Dict = depths
_UpperCAmelCase : List[Any] = num_heads
_UpperCAmelCase : str = window_size
return config
def __lowerCAmelCase (__lowerCAmelCase ):
if "patch_embed.proj" in name:
_UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_UpperCAmelCase : Dict = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
_UpperCAmelCase : Optional[Any] = "encoder." + name
if "attn.proj" in name:
_UpperCAmelCase : Any = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_UpperCAmelCase : List[Any] = name.replace("attn" , "attention.self" )
if "norm1" in name:
_UpperCAmelCase : str = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_UpperCAmelCase : Tuple = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_UpperCAmelCase : Optional[Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_UpperCAmelCase : int = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
_UpperCAmelCase : str = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
_UpperCAmelCase : int = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
_UpperCAmelCase : int = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
_UpperCAmelCase : Optional[Any] = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
_UpperCAmelCase : str = "layernorm.weight"
if name == "norm.bias":
_UpperCAmelCase : Optional[int] = "layernorm.bias"
if "head" in name:
_UpperCAmelCase : Optional[int] = name.replace("head" , "classifier" )
else:
_UpperCAmelCase : str = "swinv2." + name
return name
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
for key in orig_state_dict.copy().keys():
_UpperCAmelCase : List[str] = orig_state_dict.pop(__lowerCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
_UpperCAmelCase : Union[str, Any] = key.split("." )
_UpperCAmelCase : List[str] = int(key_split[1] )
_UpperCAmelCase : Tuple = int(key_split[3] )
_UpperCAmelCase : Dict = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCAmelCase : int = val[:dim, :]
_UpperCAmelCase : Any = val[dim : dim * 2, :]
_UpperCAmelCase : int = val[-dim:, :]
else:
_UpperCAmelCase : Optional[int] = val[:dim]
_UpperCAmelCase : Union[str, Any] = val[
dim : dim * 2
]
_UpperCAmelCase : str = val[-dim:]
else:
_UpperCAmelCase : List[str] = val
return orig_state_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
_UpperCAmelCase : Dict = get_swinva_config(__lowerCAmelCase )
_UpperCAmelCase : Tuple = SwinvaForImageClassification(__lowerCAmelCase )
model.eval()
_UpperCAmelCase : List[Any] = convert_state_dict(timm_model.state_dict() , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
_UpperCAmelCase : Optional[int] = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
_UpperCAmelCase : List[Any] = image_processor(images=__lowerCAmelCase , return_tensors="pt" )
_UpperCAmelCase : List[str] = timm_model(inputs["pixel_values"] )
_UpperCAmelCase : Dict = model(**__lowerCAmelCase ).logits
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 )
print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
model.push_to_hub(
repo_path_or_name=Path(__lowerCAmelCase , __lowerCAmelCase ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swinv2_name',
default='swinv2_tiny_patch4_window8_256',
type=str,
help='Name of the Swinv2 timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCamelCase__ = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 40
|
'''simple docstring'''
from math import factorial
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = real
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Any = [1] * rank
else:
_UpperCAmelCase : Dict = rank
def __repr__( self : str ) ->List[str]:
'''simple docstring'''
return (
F"""{self.real}+"""
F"""{'+'.join(str(lowerCamelCase__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCamelCase__ )
def __add__( self : Dict , lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return Dual(self.real + other , self.duals )
_UpperCAmelCase : Optional[int] = self.duals.copy()
_UpperCAmelCase : Optional[int] = other.duals.copy()
if len(lowerCamelCase__ ) > len(lowerCamelCase__ ):
o_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
elif len(lowerCamelCase__ ) < len(lowerCamelCase__ ):
s_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
_UpperCAmelCase : Union[str, Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCamelCase__ )
lowerCAmelCase : Tuple = __add__
def __sub__( self : List[Any] , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
return self + other * -1
def __mul__( self : List[str] , lowerCamelCase__ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = __mul__
def __truediv__( self : Optional[Any] , lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCamelCase__ )
raise ValueError
def __floordiv__( self : str , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Tuple = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCamelCase__ )
raise ValueError
def __pow__( self : Tuple , lowerCamelCase__ : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
if n < 0 or isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
_UpperCAmelCase : str = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not callable(__lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
_UpperCAmelCase : int = Dual(__lowerCAmelCase , 1 )
_UpperCAmelCase : Optional[int] = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCAmelCase (__lowerCAmelCase ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 40
| 1
|
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowerCamelCase__ = logging.get_logger(__name__)
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = UNetaDModel
lowerCAmelCase : Optional[int] = "sample"
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = 4
_UpperCAmelCase : Any = 3
_UpperCAmelCase : Tuple = (32, 32)
_UpperCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = torch.tensor([10] ).to(lowerCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ ( self : Dict ) ->Dict:
'''simple docstring'''
return (3, 32, 32)
@property
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
def lowerCAmelCase__ ( self : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : str = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
_UpperCAmelCase : List[str] = self.dummy_input
return init_dict, inputs_dict
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Dict = UNetaDModel
lowerCAmelCase : Any = "sample"
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = 4
_UpperCAmelCase : Optional[int] = 4
_UpperCAmelCase : Any = (32, 32)
_UpperCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = torch.tensor([10] ).to(lowerCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
return (4, 32, 32)
@property
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
return (4, 32, 32)
def lowerCAmelCase__ ( self : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
_UpperCAmelCase : List[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[str] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def lowerCAmelCase__ ( self : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Dict = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=lowerCamelCase__ )
model.to(lowerCamelCase__ )
_UpperCAmelCase : str = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def lowerCAmelCase__ ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=lowerCamelCase__ )
model_accelerate.to(lowerCamelCase__ )
model_accelerate.eval()
_UpperCAmelCase : List[Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase : Tuple = noise.to(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = torch.tensor([10] * noise.shape[0] ).to(lowerCamelCase__ )
_UpperCAmelCase : Any = model_accelerate(lowerCamelCase__ , lowerCamelCase__ )["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_UpperCAmelCase , _UpperCAmelCase : int = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=lowerCamelCase__ , low_cpu_mem_usage=lowerCamelCase__ )
model_normal_load.to(lowerCamelCase__ )
model_normal_load.eval()
_UpperCAmelCase : List[str] = model_normal_load(lowerCamelCase__ , lowerCamelCase__ )["sample"]
assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , rtol=1E-3 )
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" )
model.eval()
model.to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase : List[Any] = noise.to(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = torch.tensor([10] * noise.shape[0] ).to(lowerCamelCase__ )
with torch.no_grad():
_UpperCAmelCase : Any = model(lowerCamelCase__ , lowerCamelCase__ ).sample
_UpperCAmelCase : List[str] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase : Tuple = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(lowerCamelCase__ , lowerCamelCase__ , rtol=1E-3 ) )
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Union[str, Any] = UNetaDModel
lowerCAmelCase : Optional[int] = "sample"
@property
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Dict=(32, 32) ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = 4
_UpperCAmelCase : Union[str, Any] = 3
_UpperCAmelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=lowerCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ ( self : List[str] ) ->str:
'''simple docstring'''
return (3, 32, 32)
@property
def lowerCAmelCase__ ( self : Any ) ->Dict:
'''simple docstring'''
return (3, 32, 32)
def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : int = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1E-6,
"mid_block_scale_factor": math.sqrt(2.0 ),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
_UpperCAmelCase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Any = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.dummy_input
_UpperCAmelCase : Union[str, Any] = floats_tensor((4, 3) + (2_56, 2_56) ).to(lowerCamelCase__ )
_UpperCAmelCase : int = noise
_UpperCAmelCase : Dict = model(**lowerCamelCase__ )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase__ ( self : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" )
model.to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = 4
_UpperCAmelCase : int = 3
_UpperCAmelCase : Optional[int] = (2_56, 2_56)
_UpperCAmelCase : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(lowerCamelCase__ )
_UpperCAmelCase : int = torch.tensor(batch_size * [1E-4] ).to(lowerCamelCase__ )
with torch.no_grad():
_UpperCAmelCase : Tuple = model(lowerCamelCase__ , lowerCamelCase__ ).sample
_UpperCAmelCase : List[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase : Union[str, Any] = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(lowerCamelCase__ , lowerCamelCase__ , rtol=1E-2 ) )
def lowerCAmelCase__ ( self : List[str] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" )
model.to(lowerCamelCase__ )
_UpperCAmelCase : Tuple = 4
_UpperCAmelCase : List[Any] = 3
_UpperCAmelCase : Any = (32, 32)
_UpperCAmelCase : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(lowerCamelCase__ )
_UpperCAmelCase : int = torch.tensor(batch_size * [1E-4] ).to(lowerCamelCase__ )
with torch.no_grad():
_UpperCAmelCase : Any = model(lowerCamelCase__ , lowerCamelCase__ ).sample
_UpperCAmelCase : Optional[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase : Dict = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(lowerCamelCase__ , lowerCamelCase__ , rtol=1E-2 ) )
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
pass
| 40
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 40
| 1
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : List[str] = ["input_features", "is_longer"]
def __init__( self : List[str] , lowerCamelCase__ : Optional[Any]=64 , lowerCamelCase__ : int=4_80_00 , lowerCamelCase__ : str=4_80 , lowerCamelCase__ : str=10 , lowerCamelCase__ : List[str]=10_24 , lowerCamelCase__ : Any=0.0 , lowerCamelCase__ : int=False , lowerCamelCase__ : float = 0 , lowerCamelCase__ : float = 1_40_00 , lowerCamelCase__ : int = None , lowerCamelCase__ : str = "fusion" , lowerCamelCase__ : str = "repeatpad" , **lowerCamelCase__ : Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , padding_value=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
_UpperCAmelCase : int = top_db
_UpperCAmelCase : Any = truncation
_UpperCAmelCase : Optional[Any] = padding
_UpperCAmelCase : int = fft_window_size
_UpperCAmelCase : Optional[int] = (fft_window_size >> 1) + 1
_UpperCAmelCase : Optional[int] = hop_length
_UpperCAmelCase : Union[str, Any] = max_length_s
_UpperCAmelCase : Union[str, Any] = max_length_s * sampling_rate
_UpperCAmelCase : List[str] = sampling_rate
_UpperCAmelCase : Optional[int] = frequency_min
_UpperCAmelCase : List[Any] = frequency_max
_UpperCAmelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm=lowerCamelCase__ , mel_scale="htk" , )
_UpperCAmelCase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm="slaney" , mel_scale="slaney" , )
def lowerCAmelCase__ ( self : List[str] ) ->Dict[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : np.array , lowerCamelCase__ : Optional[np.array] = None ) ->np.ndarray:
'''simple docstring'''
_UpperCAmelCase : int = spectrogram(
lowerCamelCase__ , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase__ , log_mel="dB" , )
return log_mel_spectrogram.T
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_UpperCAmelCase : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_UpperCAmelCase : List[Any] = [0]
# randomly choose index for each part
_UpperCAmelCase : List[Any] = np.random.choice(ranges[0] )
_UpperCAmelCase : List[str] = np.random.choice(ranges[1] )
_UpperCAmelCase : List[str] = np.random.choice(ranges[2] )
_UpperCAmelCase : Dict = mel[idx_front : idx_front + chunk_frames, :]
_UpperCAmelCase : int = mel[idx_middle : idx_middle + chunk_frames, :]
_UpperCAmelCase : int = mel[idx_back : idx_back + chunk_frames, :]
_UpperCAmelCase : Dict = torch.tensor(mel[None, None, :] )
_UpperCAmelCase : Any = torch.nn.functional.interpolate(
lowerCamelCase__ , size=[chunk_frames, 64] , mode="bilinear" , align_corners=lowerCamelCase__ )
_UpperCAmelCase : List[str] = mel_shrink[0][0].numpy()
_UpperCAmelCase : List[str] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : np.array , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : str ) ->np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_UpperCAmelCase : Optional[int] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_UpperCAmelCase : Optional[Any] = len(lowerCamelCase__ ) - max_length
_UpperCAmelCase : List[Any] = np.random.randint(0 , overflow + 1 )
_UpperCAmelCase : str = waveform[idx : idx + max_length]
_UpperCAmelCase : Dict = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_UpperCAmelCase : List[str] = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters )
_UpperCAmelCase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_UpperCAmelCase : str = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_UpperCAmelCase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
_UpperCAmelCase : List[str] = False
else:
_UpperCAmelCase : Optional[Any] = self._random_mel_fusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
_UpperCAmelCase : Union[str, Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_UpperCAmelCase : Optional[int] = int(max_length / len(lowerCamelCase__ ) )
_UpperCAmelCase : int = np.stack(np.tile(lowerCamelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_UpperCAmelCase : Optional[Any] = int(max_length / len(lowerCamelCase__ ) )
_UpperCAmelCase : str = np.stack(np.tile(lowerCamelCase__ , lowerCamelCase__ ) )
_UpperCAmelCase : Any = np.pad(lowerCamelCase__ , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
_UpperCAmelCase : List[Any] = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters )
_UpperCAmelCase : Tuple = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_UpperCAmelCase : List[str] = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : str , lowerCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase__ : str = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : str , ) ->BatchFeature:
'''simple docstring'''
_UpperCAmelCase : Any = truncation if truncation is not None else self.truncation
_UpperCAmelCase : Any = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_UpperCAmelCase : Tuple = isinstance(lowerCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_UpperCAmelCase : int = is_batched_numpy or (
isinstance(lowerCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : Tuple = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase__ , np.ndarray ):
_UpperCAmelCase : int = np.asarray(lowerCamelCase__ , dtype=np.floataa )
elif isinstance(lowerCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : int = [np.asarray(lowerCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
_UpperCAmelCase : List[str] = [
self._get_input_mel(lowerCamelCase__ , max_length if max_length else self.nb_max_samples , lowerCamelCase__ , lowerCamelCase__ )
for waveform in raw_speech
]
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : Tuple = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase__ )
is_longer.append(lowerCamelCase__ )
if truncation == "fusion" and sum(lowerCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_UpperCAmelCase : List[Any] = np.random.randint(0 , len(lowerCamelCase__ ) )
_UpperCAmelCase : List[str] = True
if isinstance(input_mel[0] , lowerCamelCase__ ):
_UpperCAmelCase : Dict = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_UpperCAmelCase : Any = [[longer] for longer in is_longer]
_UpperCAmelCase : str = {"input_features": input_mel, "is_longer": is_longer}
_UpperCAmelCase : Dict = BatchFeature(lowerCamelCase__ )
if return_tensors is not None:
_UpperCAmelCase : Optional[int] = input_features.convert_to_tensors(lowerCamelCase__ )
return input_features
| 40
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : str=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Tuple=99 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Optional[int]=4 , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : int = use_attention_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Union[str, Any] = num_choices
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_attention_mask:
_UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
if self.use_token_type_ids:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Tuple = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Any = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : str = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )[0]
_UpperCAmelCase : int = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
_UpperCAmelCase : int = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
_UpperCAmelCase : str = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 40
| 1
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = [1]
for i in range(2 , __lowerCAmelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : List[Any] = list(range(__lowerCAmelCase ) )
# Find permutation
while factorials:
_UpperCAmelCase : Optional[Any] = factorials.pop()
_UpperCAmelCase , _UpperCAmelCase : Tuple = divmod(__lowerCAmelCase , __lowerCAmelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 40
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = '▁'
lowerCamelCase__ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCamelCase__ = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
lowerCamelCase__ = {
'facebook/xglm-564M': 2_048,
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : str = VOCAB_FILES_NAMES
lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any]="<s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : List[Any]="</s>" , lowerCamelCase__ : List[Any]="<s>" , lowerCamelCase__ : Union[str, Any]="<unk>" , lowerCamelCase__ : List[str]="<pad>" , lowerCamelCase__ : Optional[Dict[str, Any]] = None , **lowerCamelCase__ : Optional[int] , ) ->None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_UpperCAmelCase : List[Any] = 7
_UpperCAmelCase : Dict = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
_UpperCAmelCase : Optional[int] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
_UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
_UpperCAmelCase : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase : str = 1
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase : Dict = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
_UpperCAmelCase : List[str] = len(self.sp_model )
_UpperCAmelCase : List[str] = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(lowerCamelCase__ )
_UpperCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.__dict__.copy()
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict , lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase : Optional[int] = {}
_UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_UpperCAmelCase : Dict = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ ))
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ ))
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : str = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : int = self.sp_model.PieceToId(lowerCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = "".join(lowerCamelCase__ ).replace(lowerCamelCase__ , " " ).strip()
return out_string
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : Optional[Any] = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , "wb" ) as fi:
_UpperCAmelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 40
|
'''simple docstring'''
import os
def __lowerCAmelCase ():
_UpperCAmelCase : List[Any] = os.path.join(os.path.dirname(__lowerCAmelCase ) , "num.txt" )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 40
| 1
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = 10
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = 1
_UpperCAmelCase : List[Any] = max(__lowerCAmelCase )
while placement <= max_digit:
# declare and initialize empty buckets
_UpperCAmelCase : list[list] = [[] for _ in range(__lowerCAmelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
_UpperCAmelCase : Optional[int] = int((i / placement) % RADIX )
buckets[tmp].append(__lowerCAmelCase )
# put each buckets' contents into list_of_ints
_UpperCAmelCase : Dict = 0
for b in range(__lowerCAmelCase ):
for i in buckets[b]:
_UpperCAmelCase : Optional[int] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase__ = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int=1 ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = tokenizer
_UpperCAmelCase : Tuple = dataset
_UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ ) if n_tasks is None else n_tasks
_UpperCAmelCase : Any = n_copies
def __iter__( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
_UpperCAmelCase : Optional[Any] = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = start_length
_UpperCAmelCase : Union[str, Any] = eof_strings
_UpperCAmelCase : Union[str, Any] = tokenizer
def __call__( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_UpperCAmelCase : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase__ )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = re.split("(%s)" % "|".join(__lowerCAmelCase ) , __lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=20 , **__lowerCAmelCase ):
_UpperCAmelCase : Tuple = defaultdict(__lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowerCAmelCase ) ):
with torch.no_grad():
_UpperCAmelCase : Tuple = batch["ids"].shape[-1]
_UpperCAmelCase : Optional[int] = accelerator.unwrap_model(__lowerCAmelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=__lowerCAmelCase , **__lowerCAmelCase )
# each task is generated batch_size times
_UpperCAmelCase : str = batch["task_id"].repeat(__lowerCAmelCase )
_UpperCAmelCase : str = accelerator.pad_across_processes(
__lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
_UpperCAmelCase , _UpperCAmelCase : int = accelerator.gather((generated_tokens, generated_tasks) )
_UpperCAmelCase : Dict = generated_tokens.cpu().numpy()
_UpperCAmelCase : Dict = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowerCAmelCase , __lowerCAmelCase ):
gen_token_dict[task].append(__lowerCAmelCase )
_UpperCAmelCase : int = [[] for _ in range(__lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_UpperCAmelCase : List[Any] = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
code_gens[task].append(remove_last_block(__lowerCAmelCase ) )
return code_gens
def __lowerCAmelCase ():
# Setup configuration
_UpperCAmelCase : List[str] = HfArgumentParser(__lowerCAmelCase )
_UpperCAmelCase : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_UpperCAmelCase : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_UpperCAmelCase : List[str] = "false"
if args.num_workers is None:
_UpperCAmelCase : List[str] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_UpperCAmelCase : List[Any] = Accelerator()
set_seed(args.seed , device_specific=__lowerCAmelCase )
# Load model and tokenizer
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase : List[str] = tokenizer.eos_token
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_UpperCAmelCase : Tuple = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowerCAmelCase , __lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
_UpperCAmelCase : Union[str, Any] = load_dataset("openai_humaneval" )
_UpperCAmelCase : List[Any] = load_metric("code_eval" )
_UpperCAmelCase : Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
_UpperCAmelCase : Any = args.n_samples // args.batch_size
_UpperCAmelCase : Tuple = TokenizedDataset(__lowerCAmelCase , human_eval["test"] , n_copies=__lowerCAmelCase , n_tasks=__lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
_UpperCAmelCase : List[str] = DataLoader(__lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_UpperCAmelCase : Optional[int] = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
_UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Dict = complete_code(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , n_tasks=__lowerCAmelCase , batch_size=args.batch_size , **__lowerCAmelCase , )
if accelerator.is_main_process:
_UpperCAmelCase : List[Any] = []
for task in tqdm(range(__lowerCAmelCase ) ):
_UpperCAmelCase : str = human_eval["test"][task]["test"]
_UpperCAmelCase : Union[str, Any] = F"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
_UpperCAmelCase , _UpperCAmelCase : str = code_eval_metric.compute(
references=__lowerCAmelCase , predictions=__lowerCAmelCase , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 40
| 1
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = str(__lowerCAmelCase )
return len(__lowerCAmelCase ) == 9 and set(__lowerCAmelCase ) == set("123456789" )
def __lowerCAmelCase ():
for base_num in range(9_999 , 4_999 , -1 ):
_UpperCAmelCase : List[Any] = 100_002 * base_num
if is_9_pandigital(__lowerCAmelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
_UpperCAmelCase : Dict = 1_002_003 * base_num
if is_9_pandigital(__lowerCAmelCase ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 40
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 40
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 40
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_UpperCAmelCase : str = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" , revision=__lowerCAmelCase )
| 40
| 1
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowerCamelCase__ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowerCamelCase__ = typing.Union[np.floataa, int, float] # noqa: UP007
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return np.sqrt(np.sum((np.asarray(__lowerCAmelCase ) - np.asarray(__lowerCAmelCase )) ** 2 ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return sum((va - va) ** 2 for va, va in zip(__lowerCAmelCase , __lowerCAmelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def __lowerCAmelCase ():
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=10_000 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=10_000 , globals=globals() , ) )
benchmark()
| 40
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "pixel_values"
lowerCAmelCase : Dict = False
lowerCAmelCase : Union[str, Any] = TimmBackboneConfig
def __init__( self : List[str] , lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
requires_backends(self , "timm" )
super().__init__(lowerCamelCase__ )
_UpperCAmelCase : Any = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
_UpperCAmelCase : Optional[Any] = getattr(lowerCamelCase__ , "use_pretrained_backbone" , lowerCamelCase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
_UpperCAmelCase : int = config.out_indices if getattr(lowerCamelCase__ , "out_indices" , lowerCamelCase__ ) is not None else (-1,)
_UpperCAmelCase : List[Any] = timm.create_model(
config.backbone , pretrained=lowerCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCamelCase__ , **lowerCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_UpperCAmelCase : List[str] = self._backbone.return_layers
_UpperCAmelCase : Optional[int] = {layer["module"]: str(lowerCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
_UpperCAmelCase : Any = kwargs.pop("config" , TimmBackboneConfig() )
_UpperCAmelCase : Dict = kwargs.pop("use_timm_backbone" , lowerCamelCase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
_UpperCAmelCase : str = kwargs.pop("num_channels" , config.num_channels )
_UpperCAmelCase : Dict = kwargs.pop("features_only" , config.features_only )
_UpperCAmelCase : str = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
_UpperCAmelCase : Optional[Any] = kwargs.pop("out_indices" , config.out_indices )
_UpperCAmelCase : Dict = TimmBackboneConfig(
backbone=lowerCamelCase__ , num_channels=lowerCamelCase__ , features_only=lowerCamelCase__ , use_pretrained_backbone=lowerCamelCase__ , out_indices=lowerCamelCase__ , )
return super()._from_config(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : Dict ) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
_UpperCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Dict = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_UpperCAmelCase : Optional[int] = self._all_layers
_UpperCAmelCase : List[str] = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self._return_layers
_UpperCAmelCase : Tuple = tuple(hidden_states[i] for i in self.out_indices )
else:
_UpperCAmelCase : Any = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Dict = tuple(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tuple(lowerCamelCase__ ) if hidden_states is not None else None
if not return_dict:
_UpperCAmelCase : Dict = (feature_maps,)
if output_hidden_states:
_UpperCAmelCase : List[str] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase__ , hidden_states=lowerCamelCase__ , attentions=lowerCamelCase__ )
| 40
| 1
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = len(__lowerCAmelCase )
for _ in range(__lowerCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
_UpperCAmelCase , _UpperCAmelCase : str = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
lowerCamelCase__ = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 40
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 40
| 1
|
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
lowerCamelCase__ = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
lowerCAmelCase : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
lowerCAmelCase : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCAmelCase__ ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = self.task_name.lower()
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Union[str, Any] = "train"
lowerCAmelCase : Any = "dev"
lowerCAmelCase : Union[str, Any] = "test"
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : GlueDataTrainingArguments
lowerCAmelCase : str
lowerCAmelCase : List[InputFeatures]
def __init__( self : List[str] , lowerCamelCase__ : GlueDataTrainingArguments , lowerCamelCase__ : PreTrainedTokenizerBase , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Union[str, Split] = Split.train , lowerCamelCase__ : Optional[str] = None , ) ->List[Any]:
'''simple docstring'''
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , lowerCamelCase__ , )
_UpperCAmelCase : Optional[int] = args
_UpperCAmelCase : Any = glue_processors[args.task_name]()
_UpperCAmelCase : Union[str, Any] = glue_output_modes[args.task_name]
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
try:
_UpperCAmelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
_UpperCAmelCase : Union[str, Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_UpperCAmelCase : List[Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = label_list[2], label_list[1]
_UpperCAmelCase : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCAmelCase : Dict = cached_features_file + ".lock"
with FileLock(lowerCamelCase__ ):
if os.path.exists(lowerCamelCase__ ) and not args.overwrite_cache:
_UpperCAmelCase : Tuple = time.time()
_UpperCAmelCase : Dict = torch.load(lowerCamelCase__ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
_UpperCAmelCase : List[Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_UpperCAmelCase : Optional[int] = self.processor.get_test_examples(args.data_dir )
else:
_UpperCAmelCase : Tuple = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_UpperCAmelCase : str = examples[:limit_length]
_UpperCAmelCase : Optional[int] = glue_convert_examples_to_features(
lowerCamelCase__ , lowerCamelCase__ , max_length=args.max_seq_length , label_list=lowerCamelCase__ , output_mode=self.output_mode , )
_UpperCAmelCase : Any = time.time()
torch.save(self.features , lowerCamelCase__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
return len(self.features )
def __getitem__( self : List[str] , lowerCamelCase__ : Optional[int] ) ->InputFeatures:
'''simple docstring'''
return self.features[i]
def lowerCAmelCase__ ( self : List[str] ) ->Tuple:
'''simple docstring'''
return self.label_list
| 40
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[Any]=10 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Tuple=32 * 8 , lowerCamelCase__ : int=32 * 8 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Any=64 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : Optional[Any] = use_auxiliary_loss
_UpperCAmelCase : Dict = num_queries
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Union[str, Any] = min_size
_UpperCAmelCase : Optional[int] = max_size
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : Optional[int] = hidden_dim
_UpperCAmelCase : Any = hidden_dim
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
_UpperCAmelCase : int = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
_UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCAmelCase : List[str] = self.num_queries
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Union[str, Any] = [1, 1, 1, 1]
_UpperCAmelCase : Any = self.num_channels
_UpperCAmelCase : int = 64
_UpperCAmelCase : int = 1_28
_UpperCAmelCase : int = self.hidden_dim
_UpperCAmelCase : List[Any] = self.hidden_dim
_UpperCAmelCase : Any = self.hidden_dim
return config
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = self.prepare_config_and_inputs()
_UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = output.encoder_hidden_states
_UpperCAmelCase : List[str] = output.pixel_decoder_hidden_states
_UpperCAmelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict=False ) ->str:
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : List[Any] = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : int = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : int = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCAmelCase : str = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = MaskaFormerModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class(lowerCamelCase__ )
_UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple = [*signature.parameters.keys()]
_UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCAmelCase : str = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = (self.model_tester.min_size,) * 2
_UpperCAmelCase : Optional[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
_UpperCAmelCase : int = self.model_tester.get_config()
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : str = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : Optional[Any] = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Any = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def __lowerCAmelCase ():
_UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
_UpperCAmelCase : int = self.default_image_processor
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : str = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : Dict = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : str = model(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : List[Any] = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : Tuple = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : List[Any] = self.default_image_processor
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
_UpperCAmelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_UpperCAmelCase : List[str] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_UpperCAmelCase : List[Any] = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
_UpperCAmelCase : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase : str = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : List[str] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase : str = inputs["pixel_values"].to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["mask_labels"]]
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase : int = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 40
| 1
|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCamelCase__ = Lock()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__lowerCAmelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_UpperCAmelCase : Tuple = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_UpperCAmelCase : Dict = min(__lowerCAmelCase , __lowerCAmelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__lowerCAmelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_UpperCAmelCase : List[str] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_UpperCAmelCase : Optional[int] = max(__lowerCAmelCase , __lowerCAmelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Union[str, Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_UpperCAmelCase : Optional[Any] = Pipe()
_UpperCAmelCase : Dict = Pipe()
process_array_.append(
Process(
target=__lowerCAmelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_UpperCAmelCase : Optional[int] = temp_rs
_UpperCAmelCase : Tuple = temp_rr
for i in range(1 , len(__lowerCAmelCase ) - 1 ):
_UpperCAmelCase : Optional[int] = Pipe()
_UpperCAmelCase : int = Pipe()
process_array_.append(
Process(
target=__lowerCAmelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_UpperCAmelCase : List[Any] = temp_rs
_UpperCAmelCase : int = temp_rr
process_array_.append(
Process(
target=__lowerCAmelCase , args=(
len(__lowerCAmelCase ) - 1,
arr[len(__lowerCAmelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__lowerCAmelCase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__lowerCAmelCase ) ):
_UpperCAmelCase : List[str] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __lowerCAmelCase ():
_UpperCAmelCase : Any = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*__lowerCAmelCase )
_UpperCAmelCase : Tuple = odd_even_transposition(__lowerCAmelCase )
print("Sorted List\n" )
print(*__lowerCAmelCase )
if __name__ == "__main__":
main()
| 40
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def __lowerCAmelCase (__lowerCAmelCase ):
return int(x / 2**20 )
class lowerCAmelCase__ :
def __enter__( self : int ) ->Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_UpperCAmelCase : Tuple = torch.cuda.memory_allocated()
return self
def __exit__( self : Tuple , *lowerCamelCase__ : str ) ->int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
_UpperCAmelCase : List[str] = torch.cuda.memory_allocated()
_UpperCAmelCase : Tuple = torch.cuda.max_memory_allocated()
_UpperCAmelCase : List[Any] = bamb(self.end - self.begin )
_UpperCAmelCase : int = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = 16 , __lowerCAmelCase = "bert-base-cased" , __lowerCAmelCase = 320 , __lowerCAmelCase = 160 , ):
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : Any = load_dataset(
"glue" , "mrpc" , split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase : int = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_UpperCAmelCase : Any = DataLoader(
tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
_UpperCAmelCase : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : List[Any] = config["lr"]
_UpperCAmelCase : List[Any] = int(config["num_epochs"] )
_UpperCAmelCase : int = int(config["seed"] )
_UpperCAmelCase : Union[str, Any] = int(config["batch_size"] )
_UpperCAmelCase : Tuple = args.model_name_or_path
set_seed(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : List[str] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase )
# Instantiate optimizer
_UpperCAmelCase : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase : str = optimizer_cls(params=model.parameters() , lr=__lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[int] = (len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=0 , num_training_steps=__lowerCAmelCase , )
else:
_UpperCAmelCase : Optional[Any] = DummyScheduler(__lowerCAmelCase , total_num_steps=__lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase : str = 0
# Now we train the model
_UpperCAmelCase : Optional[Any] = {}
for epoch in range(__lowerCAmelCase , __lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = model(**__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = outputs.loss
_UpperCAmelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_UpperCAmelCase : Optional[int] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase ():
_UpperCAmelCase : Any = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__lowerCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCAmelCase , )
parser.add_argument(
"--output_dir" , type=__lowerCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=__lowerCAmelCase , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=__lowerCAmelCase , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=__lowerCAmelCase , default=1 , help="Number of train epochs." , )
_UpperCAmelCase : Tuple = parser.parse_args()
_UpperCAmelCase : Optional[Any] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 40
| 1
|
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCamelCase__ = datasets.utils.logging.get_logger(__name__)
class lowerCAmelCase__ ( folder_based_builder.FolderBasedBuilderConfig ):
lowerCAmelCase : bool = None
lowerCAmelCase : bool = None
class lowerCAmelCase__ ( folder_based_builder.FolderBasedBuilder ):
lowerCAmelCase : Dict = datasets.Audio()
lowerCAmelCase : int = "audio"
lowerCAmelCase : str = AudioFolderConfig
lowerCAmelCase : List[str] # definition at the bottom of the script
lowerCAmelCase : Any = AudioClassification(audio_column="audio" , label_column="label" )
lowerCamelCase__ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
lowerCamelCase__ = AUDIO_EXTENSIONS
| 40
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase__ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCamelCase__ = {
'169M': 768,
'430M': 1_024,
'1B5': 2_048,
'3B': 2_560,
'7B': 4_096,
'14B': 5_120,
}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = list(state_dict.keys() )
for name in state_dict_keys:
_UpperCAmelCase : Optional[int] = state_dict.pop(__lowerCAmelCase )
# emb -> embedding
if name.startswith("emb." ):
_UpperCAmelCase : Tuple = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
_UpperCAmelCase : Optional[int] = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
_UpperCAmelCase : Union[str, Any] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __lowerCAmelCase )
# ffn -> feed_forward
_UpperCAmelCase : Dict = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __lowerCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
_UpperCAmelCase : int = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
_UpperCAmelCase : Union[str, Any] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
_UpperCAmelCase : int = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
_UpperCAmelCase : List[str] = "rwkv." + name
_UpperCAmelCase : Optional[Any] = weight
return state_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
_UpperCAmelCase : str = 50_277
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
_UpperCAmelCase : Tuple = PreTrainedTokenizerFast(tokenizer_file=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
# 2. Build the config
_UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_UpperCAmelCase : Optional[Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_UpperCAmelCase : Any = RwkvConfig(
vocab_size=__lowerCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCAmelCase )
# 3. Download model file then convert state_dict
_UpperCAmelCase : str = hf_hub_download(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = torch.load(__lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase : Any = convert_state_dict(__lowerCAmelCase )
# 4. Split in shards and save
_UpperCAmelCase , _UpperCAmelCase : List[str] = shard_checkpoint(__lowerCAmelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if index is not None:
_UpperCAmelCase : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# Save the index as well
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
_UpperCAmelCase : int = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n"
f.write(__lowerCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
_UpperCAmelCase : Union[str, Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_UpperCAmelCase : Union[str, Any] = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
_UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
model.push_to_hub(__lowerCAmelCase , max_shard_size="2GB" )
tokenizer.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowerCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 40
| 1
|
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCAmelCase__ :
def __init__( self : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Any=13 , lowerCamelCase__ : str=30 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Optional[int]=3 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : int=5 , lowerCamelCase__ : Optional[int]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : int="gelu" , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : Tuple=10 , lowerCamelCase__ : Any=0.0_2 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Dict=2 , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : List[str] = image_size
_UpperCAmelCase : List[str] = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : List[Any] = use_labels
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : Union[str, Any] = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : Any = type_sequence_label_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Tuple = scope
_UpperCAmelCase : List[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_UpperCAmelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCAmelCase : Tuple = num_patches + 2
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : Any = None
if self.use_labels:
_UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : str = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self : Any ) ->Dict:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = DeiTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : int = DeiTForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Dict = model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCAmelCase : int = 1
_UpperCAmelCase : Optional[int] = DeiTForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Any , lowerCamelCase__ : int ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = self.type_sequence_label_size
_UpperCAmelCase : Optional[Any] = DeiTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Any = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : Tuple = DeiTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCAmelCase : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCAmelCase : Optional[Any] = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCAmelCase : Tuple = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Optional[Any] = False
def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = DeiTModelTester(self )
_UpperCAmelCase : Tuple = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def lowerCAmelCase__ ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Optional[Any] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : str = model_class(lowerCamelCase__ )
_UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple = [*signature.parameters.keys()]
_UpperCAmelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str]=False ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : int = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Union[str, Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
_UpperCAmelCase : List[str] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : List[str] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
_UpperCAmelCase : Any = model(**lowerCamelCase__ ).loss
loss.backward()
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_UpperCAmelCase : int = False
_UpperCAmelCase : Tuple = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
_UpperCAmelCase : str = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : str = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = model(**lowerCamelCase__ ).loss
loss.backward()
def lowerCAmelCase__ ( self : List[str] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Union[str, Any] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase__ ),
*get_values(lowerCamelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
_UpperCAmelCase : Any = problem_type["title"]
_UpperCAmelCase : Dict = problem_type["num_labels"]
_UpperCAmelCase : str = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Tuple = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if problem_type["num_labels"] > 1:
_UpperCAmelCase : List[Any] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
_UpperCAmelCase : Union[str, Any] = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase__ ) as warning_list:
_UpperCAmelCase : Union[str, Any] = model(**lowerCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def lowerCAmelCase__ ( self : str ) ->Optional[int]:
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : int = DeiTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __lowerCAmelCase ():
_UpperCAmelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : Any ) ->str:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ ( self : str ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = self.default_image_processor
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : Dict = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
# verify the logits
_UpperCAmelCase : int = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_UpperCAmelCase : Any = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def lowerCAmelCase__ ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
_UpperCAmelCase : Optional[int] = self.default_image_processor
_UpperCAmelCase : int = prepare_img()
_UpperCAmelCase : Dict = image_processor(images=lowerCamelCase__ , return_tensors="pt" )
_UpperCAmelCase : Any = inputs.pixel_values.to(lowerCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ )
| 40
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __lowerCAmelCase (__lowerCAmelCase ):
return np.maximum(0 , __lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 40
| 1
|
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
lowerCamelCase__ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
lowerCamelCase__ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
lowerCamelCase__ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int=4 , lowerCamelCase__ : Union[str, Any]=False ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = compute_bleu(
reference_corpus=lowerCamelCase__ , translation_corpus=lowerCamelCase__ , max_order=lowerCamelCase__ , smooth=lowerCamelCase__ )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) : str = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 40
|
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCAmelCase (__lowerCAmelCase ):
random.seed(__lowerCAmelCase )
np.random.seed(__lowerCAmelCase )
torch.manual_seed(__lowerCAmelCase )
torch.cuda.manual_seed_all(__lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class lowerCAmelCase__ :
def __init__( self : List[Any] , lowerCamelCase__ : Iterable[torch.nn.Parameter] , lowerCamelCase__ : float = 0.9_9_9_9 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 0 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Union[float, int] = 1.0 , lowerCamelCase__ : Union[float, int] = 2 / 3 , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : Dict[str, Any] = None , **lowerCamelCase__ : Optional[int] , ) ->Optional[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : List[Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : List[str] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCAmelCase : Optional[int] = True
if kwargs.get("max_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Tuple = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : str = kwargs["max_value"]
if kwargs.get("min_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Optional[int] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : Tuple = kwargs["min_value"]
_UpperCAmelCase : Optional[Any] = list(lowerCamelCase__ )
_UpperCAmelCase : Dict = [p.clone().detach() for p in parameters]
if kwargs.get("device" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Any = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
self.to(device=kwargs["device"] )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = decay
_UpperCAmelCase : Any = min_decay
_UpperCAmelCase : Optional[int] = update_after_step
_UpperCAmelCase : str = use_ema_warmup
_UpperCAmelCase : Union[str, Any] = inv_gamma
_UpperCAmelCase : Union[str, Any] = power
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : List[str] = None # set in `step()`
_UpperCAmelCase : Optional[int] = model_cls
_UpperCAmelCase : Union[str, Any] = model_config
@classmethod
def lowerCAmelCase__ ( cls : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->"EMAModel":
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = model_cls.load_config(lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model_cls.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : List[str] = cls(model.parameters() , model_cls=lowerCamelCase__ , model_config=model.config )
ema_model.load_state_dict(lowerCamelCase__ )
return ema_model
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_UpperCAmelCase : int = self.model_cls.from_config(self.model_config )
_UpperCAmelCase : Union[str, Any] = self.state_dict()
state_dict.pop("shadow_params" , lowerCamelCase__ )
model.register_to_config(**lowerCamelCase__ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : int ) ->float:
'''simple docstring'''
_UpperCAmelCase : int = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCAmelCase : int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCAmelCase : Any = (1 + step) / (10 + step)
_UpperCAmelCase : int = min(lowerCamelCase__ , self.decay )
# make sure decay is not smaller than min_decay
_UpperCAmelCase : Union[str, Any] = max(lowerCamelCase__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->Dict:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : Any = parameters.parameters()
_UpperCAmelCase : Dict = list(lowerCamelCase__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCAmelCase : Tuple = self.get_decay(self.optimization_step )
_UpperCAmelCase : Any = decay
_UpperCAmelCase : Optional[Any] = 1 - decay
_UpperCAmelCase : Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCAmelCase : str = deepspeed.zero.GatheredParameters(lowerCamelCase__ , modifier_rank=lowerCamelCase__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[str] = list(lowerCamelCase__ )
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[int]=None ) ->None:
'''simple docstring'''
_UpperCAmelCase : str = [
p.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ ) if p.is_floating_point() else p.to(device=lowerCamelCase__ )
for p in self.shadow_params
]
def lowerCAmelCase__ ( self : List[Any] ) ->dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , lowerCamelCase__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCAmelCase : int = None
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : dict ) ->None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = copy.deepcopy(lowerCamelCase__ )
_UpperCAmelCase : List[str] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_UpperCAmelCase : Union[str, Any] = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , lowerCamelCase__ ):
raise ValueError("Invalid min_decay" )
_UpperCAmelCase : List[str] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCamelCase__ ):
raise ValueError("Invalid optimization_step" )
_UpperCAmelCase : List[Any] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCamelCase__ ):
raise ValueError("Invalid update_after_step" )
_UpperCAmelCase : str = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCamelCase__ ):
raise ValueError("Invalid use_ema_warmup" )
_UpperCAmelCase : int = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
_UpperCAmelCase : Any = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
_UpperCAmelCase : List[str] = state_dict.get("shadow_params" , lowerCamelCase__ )
if shadow_params is not None:
_UpperCAmelCase : Optional[Any] = shadow_params
if not isinstance(self.shadow_params , lowerCamelCase__ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(lowerCamelCase__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 40
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Union[str, Any] = "biogpt"
def __init__( self : Dict , lowerCamelCase__ : Union[str, Any]=4_23_84 , lowerCamelCase__ : Any=10_24 , lowerCamelCase__ : List[str]=24 , lowerCamelCase__ : Dict=16 , lowerCamelCase__ : Optional[Any]=40_96 , lowerCamelCase__ : Optional[Any]="gelu" , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : List[str]=10_24 , lowerCamelCase__ : List[str]=0.0_2 , lowerCamelCase__ : Any=1E-12 , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : str=1 , lowerCamelCase__ : Any=0 , lowerCamelCase__ : List[str]=2 , **lowerCamelCase__ : List[Any] , ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = vocab_size
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : int = intermediate_size
_UpperCAmelCase : Tuple = hidden_act
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : List[Any] = scale_embedding
_UpperCAmelCase : List[str] = use_cache
_UpperCAmelCase : Optional[int] = layerdrop
_UpperCAmelCase : Any = activation_dropout
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
| 40
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
lowerCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowerCamelCase__ = state_dict['cls.predictions.decoder.weight']
lowerCamelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.dense.{w}''']
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 40
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if dataset.ndim != value_array.ndim:
_UpperCAmelCase : Optional[Any] = (
"Wrong input data's dimensions... "
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCAmelCase : Optional[int] = (
"Wrong input data's shape... "
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_UpperCAmelCase : Union[str, Any] = (
"Input data have different datatype... "
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = []
for value in value_array:
_UpperCAmelCase : List[str] = euclidean(__lowerCAmelCase , dataset[0] )
_UpperCAmelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCAmelCase : int = euclidean(__lowerCAmelCase , __lowerCAmelCase )
if dist > temp_dist:
_UpperCAmelCase : Tuple = temp_dist
_UpperCAmelCase : Dict = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return np.dot(__lowerCAmelCase , __lowerCAmelCase ) / (norm(__lowerCAmelCase ) * norm(__lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : dict[str, list[str]] , lowerCamelCase__ : str ) ->None:
'''simple docstring'''
_UpperCAmelCase : Dict = graph
# mapping node to its parent in resulting breadth first tree
_UpperCAmelCase : dict[str, str | None] = {}
_UpperCAmelCase : List[Any] = source_vertex
def lowerCAmelCase__ ( self : Optional[int] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {self.source_vertex}
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[str] = [self.source_vertex] # first in first out queue
while queue:
_UpperCAmelCase : int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = vertex
queue.append(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
_UpperCAmelCase : int = self.parent.get(lowerCamelCase__ )
if target_vertex_parent is None:
_UpperCAmelCase : Tuple = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(lowerCamelCase__ )
return self.shortest_path(lowerCamelCase__ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 40
| 1
|
'''simple docstring'''
import math
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if (
not isinstance(__lowerCAmelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if (
not isinstance(__lowerCAmelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = ["image_processor", "tokenizer"]
lowerCAmelCase : List[Any] = "BlipImageProcessor"
lowerCAmelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = False
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.image_processor
def __call__( self : Dict , lowerCamelCase__ : ImageInput = None , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Tuple , ) ->BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_UpperCAmelCase : Optional[int] = self.tokenizer
_UpperCAmelCase : List[Any] = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
return text_encoding
# add pixel_values
_UpperCAmelCase : Optional[int] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
if text is not None:
_UpperCAmelCase : Dict = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
else:
_UpperCAmelCase : int = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def lowerCAmelCase__ ( self : List[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.tokenizer.model_input_names
_UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 40
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 40
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ): # noqa: E741
_UpperCAmelCase : List[str] = len(__lowerCAmelCase )
_UpperCAmelCase : str = 0
_UpperCAmelCase : List[str] = [0] * n
_UpperCAmelCase : int = [False] * n
_UpperCAmelCase : Dict = [False] * n
def dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if parent == root:
out_edge_count += 1
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : str = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_UpperCAmelCase : List[str] = dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Tuple = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_UpperCAmelCase : Dict = True
# AP found via cycle
if at == low[to]:
_UpperCAmelCase : Dict = True
else:
_UpperCAmelCase : Optional[int] = min(low[at] , __lowerCAmelCase )
return out_edge_count
for i in range(__lowerCAmelCase ):
if not visited[i]:
_UpperCAmelCase : str = 0
_UpperCAmelCase : Tuple = dfs(__lowerCAmelCase , __lowerCAmelCase , -1 , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = out_edge_count > 1
for x in range(len(__lowerCAmelCase ) ):
if is_art[x] is True:
print(__lowerCAmelCase )
# Adjacency list of graph
lowerCamelCase__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 40
| 1
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase (__lowerCAmelCase ): # This function is recursive
_UpperCAmelCase : Tuple = len(__lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
_UpperCAmelCase : List[Any] = array[0]
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[Any] = [element for element in array[i:] if element >= array[i]]
_UpperCAmelCase : List[str] = longest_subsequence(__lowerCAmelCase )
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = temp_array
else:
i += 1
_UpperCAmelCase : List[str] = [element for element in array[1:] if element >= pivot]
_UpperCAmelCase : Tuple = [pivot, *longest_subsequence(__lowerCAmelCase )]
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
'''simple docstring'''
def __lowerCAmelCase ():
_UpperCAmelCase : str = 0
for i in range(1 , 1_001 ):
total += i**i
return str(__lowerCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 40
| 1
|
'''simple docstring'''
lowerCamelCase__ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCamelCase__ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCamelCase__ = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
assert len(str(__lowerCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_UpperCAmelCase : List[Any] = year // 100
_UpperCAmelCase : Dict = (5 * (century % 4) + 2) % 7
_UpperCAmelCase : int = year % 100
_UpperCAmelCase : Dict = centurian % 12
_UpperCAmelCase : Union[str, Any] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_UpperCAmelCase : int = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_UpperCAmelCase : Optional[Any] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if dataset.ndim != value_array.ndim:
_UpperCAmelCase : Optional[Any] = (
"Wrong input data's dimensions... "
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCAmelCase : Optional[int] = (
"Wrong input data's shape... "
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_UpperCAmelCase : Union[str, Any] = (
"Input data have different datatype... "
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = []
for value in value_array:
_UpperCAmelCase : List[str] = euclidean(__lowerCAmelCase , dataset[0] )
_UpperCAmelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCAmelCase : int = euclidean(__lowerCAmelCase , __lowerCAmelCase )
if dist > temp_dist:
_UpperCAmelCase : Tuple = temp_dist
_UpperCAmelCase : Dict = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return np.dot(__lowerCAmelCase , __lowerCAmelCase ) / (norm(__lowerCAmelCase ) * norm(__lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : List[str] = "unispeech"
def __init__( self : List[str] , lowerCamelCase__ : Dict=32 , lowerCamelCase__ : List[str]=7_68 , lowerCamelCase__ : str=12 , lowerCamelCase__ : int=12 , lowerCamelCase__ : int=30_72 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : str=0.0 , lowerCamelCase__ : int=0.0 , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : List[str]=0.0_2 , lowerCamelCase__ : Tuple=1E-5 , lowerCamelCase__ : Optional[int]="group" , lowerCamelCase__ : List[str]="gelu" , lowerCamelCase__ : Optional[Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowerCamelCase__ : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase__ : List[Any]=(10, 3, 3, 3, 3, 2, 2) , lowerCamelCase__ : int=False , lowerCamelCase__ : Union[str, Any]=1_28 , lowerCamelCase__ : List[Any]=16 , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : List[Any]=0.0_5 , lowerCamelCase__ : Dict=10 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : Tuple=10 , lowerCamelCase__ : Optional[Any]=0 , lowerCamelCase__ : str=3_20 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Optional[int]=1_00 , lowerCamelCase__ : Optional[Any]=2_56 , lowerCamelCase__ : Any=2_56 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : str="mean" , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : str=2_56 , lowerCamelCase__ : List[str]=80 , lowerCamelCase__ : Tuple=0 , lowerCamelCase__ : Optional[Any]=1 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : str=0.5 , **lowerCamelCase__ : Union[str, Any] , ) ->Any:
'''simple docstring'''
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : Any = feat_extract_norm
_UpperCAmelCase : Optional[int] = feat_extract_activation
_UpperCAmelCase : Tuple = list(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = list(lowerCamelCase__ )
_UpperCAmelCase : Tuple = list(lowerCamelCase__ )
_UpperCAmelCase : List[str] = conv_bias
_UpperCAmelCase : int = num_conv_pos_embeddings
_UpperCAmelCase : Dict = num_conv_pos_embedding_groups
_UpperCAmelCase : Union[str, Any] = len(self.conv_dim )
_UpperCAmelCase : Optional[int] = num_hidden_layers
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : Dict = hidden_dropout
_UpperCAmelCase : List[str] = attention_dropout
_UpperCAmelCase : int = activation_dropout
_UpperCAmelCase : Union[str, Any] = feat_proj_dropout
_UpperCAmelCase : List[Any] = final_dropout
_UpperCAmelCase : List[str] = layerdrop
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : int = num_ctc_classes
_UpperCAmelCase : Tuple = vocab_size
_UpperCAmelCase : List[str] = do_stable_layer_norm
_UpperCAmelCase : Optional[Any] = use_weighted_layer_sum
_UpperCAmelCase : Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase : List[str] = apply_spec_augment
_UpperCAmelCase : int = mask_time_prob
_UpperCAmelCase : Union[str, Any] = mask_time_length
_UpperCAmelCase : str = mask_time_min_masks
_UpperCAmelCase : Dict = mask_feature_prob
_UpperCAmelCase : Any = mask_feature_length
_UpperCAmelCase : Union[str, Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_UpperCAmelCase : List[str] = num_codevectors_per_group
_UpperCAmelCase : str = num_codevector_groups
_UpperCAmelCase : Optional[Any] = contrastive_logits_temperature
_UpperCAmelCase : Union[str, Any] = feat_quantizer_dropout
_UpperCAmelCase : Any = num_negatives
_UpperCAmelCase : str = codevector_dim
_UpperCAmelCase : List[str] = proj_codevector_dim
_UpperCAmelCase : Dict = diversity_loss_weight
# ctc loss
_UpperCAmelCase : Optional[int] = ctc_loss_reduction
_UpperCAmelCase : Tuple = ctc_zero_infinity
# pretraining loss
_UpperCAmelCase : List[str] = replace_prob
@property
def lowerCAmelCase__ ( self : str ) ->Union[str, Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 40
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase__ = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
_UpperCAmelCase : Optional[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowerCamelCase__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any=None ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCAmelCase : Tuple = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCAmelCase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_UpperCAmelCase : Tuple = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowerCamelCase__ , "w" , newline="\n" ) as f:
f.write(lowerCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__ )
with open(lowerCamelCase__ , "r" ) as f:
self.assertTrue(f.read() , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowerCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
# Copy consistency with a really long name
_UpperCAmelCase : int = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , lowerCamelCase__ , lowerCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowerCamelCase__ , overwrite_result=re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
| 40
| 1
|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCamelCase__ = get_logger(__name__)
class lowerCAmelCase__ :
def __init__( self : List[Any] , lowerCamelCase__ : Optional[str] = None ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Any = (
os.path.join(lowerCamelCase__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_UpperCAmelCase : Tuple = Extractor
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_UpperCAmelCase : Dict = os.path.abspath(lowerCamelCase__ )
return os.path.join(self.extract_dir , hash_url_to_filename(lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : str , lowerCamelCase__ : bool ) ->bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(lowerCamelCase__ ) and not (os.path.isdir(lowerCamelCase__ ) and os.listdir(lowerCamelCase__ ))
)
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : bool = False ) ->str:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.extractor.infer_extractor_format(lowerCamelCase__ )
if not extractor_format:
return input_path
_UpperCAmelCase : List[Any] = self._get_output_path(lowerCamelCase__ )
if self._do_extract(lowerCamelCase__ , lowerCamelCase__ ):
self.extractor.extract(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return output_path
class lowerCAmelCase__ ( UpperCAmelCase__ ):
@classmethod
@abstractmethod
def lowerCAmelCase__ ( cls : List[Any] , lowerCamelCase__ : Union[Path, str] , **lowerCamelCase__ : Optional[Any] ) ->bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : Union[Path, str] ) ->None:
'''simple docstring'''
...
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : List[bytes] = []
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : int ) ->Any:
'''simple docstring'''
with open(lowerCamelCase__ , "rb" ) as f:
return f.read(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : bytes = b"" ) ->bool:
'''simple docstring'''
if not magic_number:
_UpperCAmelCase : Dict = max(len(lowerCamelCase__ ) for cls_magic_number in cls.magic_numbers )
try:
_UpperCAmelCase : Dict = cls.read_magic_number(lowerCamelCase__ , lowerCamelCase__ )
except OSError:
return False
return any(magic_number.startswith(lowerCamelCase__ ) for cls_magic_number in cls.magic_numbers )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
@classmethod
def lowerCAmelCase__ ( cls : List[str] , lowerCamelCase__ : Union[Path, str] , **lowerCamelCase__ : Any ) ->bool:
'''simple docstring'''
return tarfile.is_tarfile(lowerCamelCase__ )
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] ) ->Optional[int]:
'''simple docstring'''
def resolved(lowerCamelCase__ : str ) -> str:
return os.path.realpath(os.path.abspath(lowerCamelCase__ ) )
def badpath(lowerCamelCase__ : str , lowerCamelCase__ : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) ).startswith(lowerCamelCase__ )
def badlink(lowerCamelCase__ : Any , lowerCamelCase__ : str ) -> bool:
# Links are interpreted relative to the directory containing the link
_UpperCAmelCase : List[Any] = resolved(os.path.join(lowerCamelCase__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=lowerCamelCase__ )
_UpperCAmelCase : int = resolved(lowerCamelCase__ )
for finfo in members:
if badpath(finfo.name , lowerCamelCase__ ):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(lowerCamelCase__ , lowerCamelCase__ ):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(lowerCamelCase__ , lowerCamelCase__ ):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : Union[Path, str] ) ->None:
'''simple docstring'''
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
_UpperCAmelCase : Any = tarfile.open(lowerCamelCase__ )
tar_file.extractall(lowerCamelCase__ , members=TarExtractor.safemembers(lowerCamelCase__ , lowerCamelCase__ ) )
tar_file.close()
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = [b"\x1F\x8B"]
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : Union[Path, str] ) ->None:
'''simple docstring'''
with gzip.open(lowerCamelCase__ , "rb" ) as gzip_file:
with open(lowerCamelCase__ , "wb" ) as extracted_file:
shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : List[Any] = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def lowerCAmelCase__ ( cls : str , lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : bytes = b"" ) ->bool:
'''simple docstring'''
if super().is_extractable(lowerCamelCase__ , magic_number=lowerCamelCase__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowerCamelCase__ , "rb" ) as fp:
_UpperCAmelCase : List[Any] = _EndRecData(lowerCamelCase__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_UpperCAmelCase : Tuple = fp.read(lowerCamelCase__ ) # CD is where we expect it to be
if len(lowerCamelCase__ ) == sizeCentralDir:
_UpperCAmelCase : List[str] = struct.unpack(lowerCamelCase__ , lowerCamelCase__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : Union[Path, str] ) ->None:
'''simple docstring'''
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
with zipfile.ZipFile(lowerCamelCase__ , "r" ) as zip_file:
zip_file.extractall(lowerCamelCase__ )
zip_file.close()
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : List[str] = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : Union[Path, str] ) ->None:
'''simple docstring'''
with lzma.open(lowerCamelCase__ ) as compressed_file:
with open(lowerCamelCase__ , "wb" ) as extracted_file:
shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : str = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : Union[Path, str] ) ->None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
_UpperCAmelCase : int = rarfile.RarFile(lowerCamelCase__ )
rf.extractall(lowerCamelCase__ )
rf.close()
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Dict = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : Union[Path, str] ) ->None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
_UpperCAmelCase : Tuple = zstd.ZstdDecompressor()
with open(lowerCamelCase__ , "rb" ) as ifh, open(lowerCamelCase__ , "wb" ) as ofh:
dctx.copy_stream(lowerCamelCase__ , lowerCamelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : str = [b"\x42\x5A\x68"]
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : Union[Path, str] ) ->None:
'''simple docstring'''
with bza.open(lowerCamelCase__ , "rb" ) as compressed_file:
with open(lowerCamelCase__ , "wb" ) as extracted_file:
shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : List[str] = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : Union[Path, str] ) ->None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
with pyazr.SevenZipFile(lowerCamelCase__ , "r" ) as archive:
archive.extractall(lowerCamelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : List[Any] = [b"\x04\x22\x4D\x18"]
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : Union[Path, str] ) ->None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(lowerCamelCase__ , "rb" ) as compressed_file:
with open(lowerCamelCase__ , "wb" ) as extracted_file:
shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ )
class lowerCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
lowerCAmelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowerCAmelCase__ ( cls : str ) ->Dict:
'''simple docstring'''
return max(
len(lowerCamelCase__ )
for extractor in cls.extractors.values()
if issubclass(lowerCamelCase__ , lowerCamelCase__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : int ) ->Any:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(lowerCamelCase__ , magic_number_length=lowerCamelCase__ )
except OSError:
return b""
@classmethod
def lowerCAmelCase__ ( cls : Tuple , lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : bool = False ) ->bool:
'''simple docstring'''
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=lowerCamelCase__ , )
_UpperCAmelCase : Tuple = cls.infer_extractor_format(lowerCamelCase__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowerCAmelCase__ ( cls : Dict , lowerCamelCase__ : Union[Path, str] ) ->str: # <Added version="2.4.0"/>
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = cls._get_magic_number_max_length()
_UpperCAmelCase : Optional[Any] = cls._read_magic_number(lowerCamelCase__ , lowerCamelCase__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowerCamelCase__ , magic_number=lowerCamelCase__ ):
return extractor_format
@classmethod
def lowerCAmelCase__ ( cls : Optional[int] , lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[BaseExtractor] = "deprecated" , ) ->None:
'''simple docstring'''
os.makedirs(os.path.dirname(lowerCamelCase__ ) , exist_ok=lowerCamelCase__ )
# Prevent parallel extractions
_UpperCAmelCase : List[str] = str(Path(lowerCamelCase__ ).with_suffix(".lock" ) )
with FileLock(lowerCamelCase__ ):
shutil.rmtree(lowerCamelCase__ , ignore_errors=lowerCamelCase__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowerCamelCase__ , lowerCamelCase__ ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=lowerCamelCase__ , )
_UpperCAmelCase : Optional[Any] = extractor if extractor != "deprecated" else extractor_format
else:
_UpperCAmelCase : List[str] = cls.extractors[extractor_format]
return extractor.extract(lowerCamelCase__ , lowerCamelCase__ )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=lowerCamelCase__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowerCamelCase__ ):
return extractor.extract(lowerCamelCase__ , lowerCamelCase__ )
| 40
|
'''simple docstring'''
from math import factorial
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = real
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Any = [1] * rank
else:
_UpperCAmelCase : Dict = rank
def __repr__( self : str ) ->List[str]:
'''simple docstring'''
return (
F"""{self.real}+"""
F"""{'+'.join(str(lowerCamelCase__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCamelCase__ )
def __add__( self : Dict , lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return Dual(self.real + other , self.duals )
_UpperCAmelCase : Optional[int] = self.duals.copy()
_UpperCAmelCase : Optional[int] = other.duals.copy()
if len(lowerCamelCase__ ) > len(lowerCamelCase__ ):
o_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
elif len(lowerCamelCase__ ) < len(lowerCamelCase__ ):
s_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
_UpperCAmelCase : Union[str, Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCamelCase__ )
lowerCAmelCase : Tuple = __add__
def __sub__( self : List[Any] , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
return self + other * -1
def __mul__( self : List[str] , lowerCamelCase__ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = __mul__
def __truediv__( self : Optional[Any] , lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCamelCase__ )
raise ValueError
def __floordiv__( self : str , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Tuple = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCamelCase__ )
raise ValueError
def __pow__( self : Tuple , lowerCamelCase__ : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
if n < 0 or isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
_UpperCAmelCase : str = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not callable(__lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
_UpperCAmelCase : int = Dual(__lowerCAmelCase , 1 )
_UpperCAmelCase : Optional[int] = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCAmelCase (__lowerCAmelCase ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 40
| 1
|
'''simple docstring'''
lowerCamelCase__ = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 40
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 40
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = tempfile.mkdtemp()
_UpperCAmelCase : Tuple = BlipImageProcessor()
_UpperCAmelCase : Union[str, Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
_UpperCAmelCase : Optional[int] = BlipProcessor(lowerCamelCase__ , lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Optional[Any] , **lowerCamelCase__ : int ) ->Optional[int]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ ).tokenizer
def lowerCAmelCase__ ( self : List[Any] , **lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ ).image_processor
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : List[Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : str = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_UpperCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase : Optional[int] = self.get_image_processor(do_normalize=lowerCamelCase__ , padding_value=1.0 )
_UpperCAmelCase : int = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : int = self.get_image_processor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Any = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : str = self.prepare_image_inputs()
_UpperCAmelCase : str = image_processor(lowerCamelCase__ , return_tensors="np" )
_UpperCAmelCase : Dict = processor(images=lowerCamelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase__ ( self : str ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : List[Any] = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : Tuple = "lower newer"
_UpperCAmelCase : str = processor(text=lowerCamelCase__ )
_UpperCAmelCase : int = tokenizer(lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = self.get_image_processor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Tuple = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : Tuple = "lower newer"
_UpperCAmelCase : Any = self.prepare_image_inputs()
_UpperCAmelCase : Dict = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def lowerCAmelCase__ ( self : str ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.get_image_processor()
_UpperCAmelCase : List[Any] = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : List[Any] = processor.batch_decode(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.get_image_processor()
_UpperCAmelCase : Dict = self.get_tokenizer()
_UpperCAmelCase : Union[str, Any] = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = "lower newer"
_UpperCAmelCase : Dict = self.prepare_image_inputs()
_UpperCAmelCase : str = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 40
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : str=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Tuple=99 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Optional[int]=4 , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : int = use_attention_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Union[str, Any] = num_choices
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_attention_mask:
_UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
if self.use_token_type_ids:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Tuple = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Any = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : str = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )[0]
_UpperCAmelCase : int = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
_UpperCAmelCase : int = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
_UpperCAmelCase : str = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 40
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowerCamelCase__ = 256_047
lowerCamelCase__ = 256_145
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[Any] = NllbTokenizer
lowerCAmelCase : Tuple = NllbTokenizerFast
lowerCAmelCase : str = True
lowerCAmelCase : str = True
lowerCAmelCase : int = {}
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : Union[str, Any] = NllbTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = NllbTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
_UpperCAmelCase : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_UpperCAmelCase : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_UpperCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def lowerCAmelCase__ ( self : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Any = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : int = tempfile.mkdtemp()
_UpperCAmelCase : Tuple = tokenizer_r.save_pretrained(lowerCamelCase__ )
_UpperCAmelCase : Any = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
_UpperCAmelCase : List[Any] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(lowerCamelCase__ , lowerCamelCase__ )
# Checks everything loads correctly in the same way
_UpperCAmelCase : Tuple = tokenizer_r.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=True
_UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
_UpperCAmelCase : List[str] = tokenizer_r.save_pretrained(lowerCamelCase__ , legacy_format=lowerCamelCase__ )
_UpperCAmelCase : List[str] = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase__ , lowerCamelCase__ )
# Checks everything loads correctly in the same way
_UpperCAmelCase : Union[str, Any] = tokenizer_r.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=False
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
_UpperCAmelCase : Dict = tokenizer_r.save_pretrained(lowerCamelCase__ , legacy_format=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_UpperCAmelCase : int = tokenizer_r.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : int = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
if not self.test_seqaseq:
return
_UpperCAmelCase : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
_UpperCAmelCase : Tuple = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"
" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"
" will only worsen the violence and misery for millions of people.",
]
_UpperCAmelCase : Dict = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"
" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"
" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
try:
_UpperCAmelCase : str = tokenizer.prepare_seqaseq_batch(
src_texts=lowerCamelCase__ , tgt_texts=lowerCamelCase__ , max_length=3 , max_target_length=10 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
_UpperCAmelCase : Union[str, Any] = tokenizer.prepare_seqaseq_batch(
lowerCamelCase__ , tgt_texts=lowerCamelCase__ , max_length=3 , return_tensors="pt" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
_UpperCAmelCase : Union[str, Any] = tokenizer.prepare_seqaseq_batch(
src_texts=lowerCamelCase__ , max_length=3 , max_target_length=10 , return_tensors="pt" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("decoder_input_ids" , lowerCamelCase__ )
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." )
def lowerCAmelCase__ ( self : Any ) ->Tuple:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[str] ) ->str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase : str = [AddedToken("<special>" , lstrip=lowerCamelCase__ )]
_UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : str = tokenizer_r.encode("Hey this is a <special> token" )
_UpperCAmelCase : str = tokenizer_r.encode("<special>" , add_special_tokens=lowerCamelCase__ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
_UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
_UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(
lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Tuple = tokenizer_p.encode("Hey this is a <special> token" )
_UpperCAmelCase : Dict = tokenizer_cr.encode("Hey this is a <special> token" )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase : Union[str, Any] = "facebook/nllb-200-distilled-600M"
lowerCAmelCase : Dict = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
lowerCAmelCase : Optional[int] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
lowerCAmelCase : Dict = [
256_047,
16_297,
134_408,
8_165,
248_066,
14_734,
950,
1_135,
105_721,
3_573,
83,
27_352,
108,
49_486,
2,
]
@classmethod
def lowerCAmelCase__ ( cls : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn" )
_UpperCAmelCase : str = 1
return cls
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 25_60_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 25_60_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 25_60_57 )
def lowerCAmelCase__ ( self : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
self.assertIn(lowerCamelCase__ , self.tokenizer.all_special_ids )
# fmt: off
_UpperCAmelCase : List[Any] = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47]
# fmt: on
_UpperCAmelCase : Optional[Any] = self.tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
_UpperCAmelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , lowerCamelCase__ )
_UpperCAmelCase : Tuple = 10
_UpperCAmelCase : Union[str, Any] = self.tokenizer(lowerCamelCase__ , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , lowerCamelCase__ )
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Dict:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_62_03, 3] )
def lowerCAmelCase__ ( self : Tuple ) ->int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
_UpperCAmelCase : Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase__ )
_UpperCAmelCase : int = NllbTokenizer.from_pretrained(lowerCamelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase__ )
@require_torch
def lowerCAmelCase__ ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
_UpperCAmelCase : Optional[int] = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"] )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
_UpperCAmelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=3 , return_tensors="pt" )
_UpperCAmelCase : List[Any] = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=10 , return_tensors="pt" )
_UpperCAmelCase : List[Any] = targets["input_ids"]
_UpperCAmelCase : Tuple = shift_tokens_right(
lowerCamelCase__ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , {
# A, test, EOS, en_XX
"input_ids": [[25_60_47, 70, 73_56, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_60_57,
} , )
@require_torch
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Optional[Any] = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] )
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Dict = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
| 40
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 40
| 1
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 40
|
'''simple docstring'''
import os
def __lowerCAmelCase ():
_UpperCAmelCase : List[Any] = os.path.join(os.path.dirname(__lowerCAmelCase ) , "num.txt" )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 40
| 1
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase (__lowerCAmelCase ):
if not nums:
return 0
_UpperCAmelCase : Optional[Any] = nums[0]
_UpperCAmelCase : int = 0
for num in nums[1:]:
_UpperCAmelCase , _UpperCAmelCase : int = (
max_excluding + num,
max(__lowerCAmelCase , __lowerCAmelCase ),
)
return max(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase__ = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int=1 ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = tokenizer
_UpperCAmelCase : Tuple = dataset
_UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ ) if n_tasks is None else n_tasks
_UpperCAmelCase : Any = n_copies
def __iter__( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
_UpperCAmelCase : Optional[Any] = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = start_length
_UpperCAmelCase : Union[str, Any] = eof_strings
_UpperCAmelCase : Union[str, Any] = tokenizer
def __call__( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_UpperCAmelCase : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase__ )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = re.split("(%s)" % "|".join(__lowerCAmelCase ) , __lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=20 , **__lowerCAmelCase ):
_UpperCAmelCase : Tuple = defaultdict(__lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowerCAmelCase ) ):
with torch.no_grad():
_UpperCAmelCase : Tuple = batch["ids"].shape[-1]
_UpperCAmelCase : Optional[int] = accelerator.unwrap_model(__lowerCAmelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=__lowerCAmelCase , **__lowerCAmelCase )
# each task is generated batch_size times
_UpperCAmelCase : str = batch["task_id"].repeat(__lowerCAmelCase )
_UpperCAmelCase : str = accelerator.pad_across_processes(
__lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
_UpperCAmelCase , _UpperCAmelCase : int = accelerator.gather((generated_tokens, generated_tasks) )
_UpperCAmelCase : Dict = generated_tokens.cpu().numpy()
_UpperCAmelCase : Dict = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowerCAmelCase , __lowerCAmelCase ):
gen_token_dict[task].append(__lowerCAmelCase )
_UpperCAmelCase : int = [[] for _ in range(__lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_UpperCAmelCase : List[Any] = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
code_gens[task].append(remove_last_block(__lowerCAmelCase ) )
return code_gens
def __lowerCAmelCase ():
# Setup configuration
_UpperCAmelCase : List[str] = HfArgumentParser(__lowerCAmelCase )
_UpperCAmelCase : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_UpperCAmelCase : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_UpperCAmelCase : List[str] = "false"
if args.num_workers is None:
_UpperCAmelCase : List[str] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_UpperCAmelCase : List[Any] = Accelerator()
set_seed(args.seed , device_specific=__lowerCAmelCase )
# Load model and tokenizer
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase : List[str] = tokenizer.eos_token
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_UpperCAmelCase : Tuple = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowerCAmelCase , __lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
_UpperCAmelCase : Union[str, Any] = load_dataset("openai_humaneval" )
_UpperCAmelCase : List[Any] = load_metric("code_eval" )
_UpperCAmelCase : Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
_UpperCAmelCase : Any = args.n_samples // args.batch_size
_UpperCAmelCase : Tuple = TokenizedDataset(__lowerCAmelCase , human_eval["test"] , n_copies=__lowerCAmelCase , n_tasks=__lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
_UpperCAmelCase : List[str] = DataLoader(__lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_UpperCAmelCase : Optional[int] = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
_UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Dict = complete_code(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , n_tasks=__lowerCAmelCase , batch_size=args.batch_size , **__lowerCAmelCase , )
if accelerator.is_main_process:
_UpperCAmelCase : List[Any] = []
for task in tqdm(range(__lowerCAmelCase ) ):
_UpperCAmelCase : str = human_eval["test"][task]["test"]
_UpperCAmelCase : Union[str, Any] = F"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
_UpperCAmelCase , _UpperCAmelCase : str = code_eval_metric.compute(
references=__lowerCAmelCase , predictions=__lowerCAmelCase , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 40
| 1
|
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __lowerCAmelCase (__lowerCAmelCase ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __lowerCAmelCase ():
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
_UpperCAmelCase : Union[str, Any] = [1, 2, 3]
with pytest.raises(__lowerCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=2 )
with pytest.raises(__lowerCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Dict = [1, 2]
_UpperCAmelCase : List[Any] = {"a": 1, "b": 2}
_UpperCAmelCase : List[Any] = {"a": [1, 2], "b": [3, 4]}
_UpperCAmelCase : Tuple = {"a": {"1": 1}, "b": 2}
_UpperCAmelCase : List[str] = {"a": 1, "b": 2, "c": 3, "d": 4}
_UpperCAmelCase : Tuple = [2, 3]
_UpperCAmelCase : str = {"a": 2, "b": 3}
_UpperCAmelCase : int = {"a": [2, 3], "b": [4, 5]}
_UpperCAmelCase : Optional[Any] = {"a": {"1": 2}, "b": 3}
_UpperCAmelCase : List[str] = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
| 40
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 40
| 1
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
def __init__( self : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Any=13 , lowerCamelCase__ : Dict=32 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Union[str, Any]=16 , lowerCamelCase__ : Union[str, Any]=[32, 64, 1_28] , lowerCamelCase__ : str=[1, 2, 1] , lowerCamelCase__ : Optional[Any]=[2, 2, 4] , lowerCamelCase__ : str=2 , lowerCamelCase__ : str=2.0 , lowerCamelCase__ : int=True , lowerCamelCase__ : int=0.0 , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : str=0.1 , lowerCamelCase__ : str="gelu" , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Any=1E-5 , lowerCamelCase__ : str=True , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[Any]=10 , lowerCamelCase__ : str=8 , lowerCamelCase__ : str=["stage1", "stage2"] , lowerCamelCase__ : str=[1, 2] , ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : int = batch_size
_UpperCAmelCase : int = image_size
_UpperCAmelCase : Dict = patch_size
_UpperCAmelCase : Any = num_channels
_UpperCAmelCase : Optional[Any] = embed_dim
_UpperCAmelCase : Dict = hidden_sizes
_UpperCAmelCase : Dict = depths
_UpperCAmelCase : Optional[Any] = num_heads
_UpperCAmelCase : Optional[Any] = window_size
_UpperCAmelCase : List[Any] = mlp_ratio
_UpperCAmelCase : int = qkv_bias
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
_UpperCAmelCase : Tuple = drop_path_rate
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : Optional[Any] = use_absolute_embeddings
_UpperCAmelCase : Union[str, Any] = patch_norm
_UpperCAmelCase : int = layer_norm_eps
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : Optional[Any] = scope
_UpperCAmelCase : str = use_labels
_UpperCAmelCase : Dict = type_sequence_label_size
_UpperCAmelCase : str = encoder_stride
_UpperCAmelCase : Tuple = out_features
_UpperCAmelCase : int = out_indices
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : Optional[int] = None
if self.use_labels:
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Tuple = FocalNetModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : List[Any] = model(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FocalNetBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : List[str] = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_UpperCAmelCase : Any = None
_UpperCAmelCase : Union[str, Any] = FocalNetBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : List[Any] = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Dict = FocalNetForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Union[str, Any] = model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCAmelCase : Optional[int] = 1
_UpperCAmelCase : Tuple = FocalNetForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = self.type_sequence_label_size
_UpperCAmelCase : Any = FocalNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase : int = 1
_UpperCAmelCase : Dict = FocalNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = config_and_inputs
_UpperCAmelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase : Tuple = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : Any = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : List[str] = False
lowerCAmelCase : Tuple = False
lowerCAmelCase : List[Any] = False
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Tuple = FocalNetModelTester(self )
_UpperCAmelCase : Tuple = ConfigTester(self , config_class=lowerCamelCase__ , embed_dim=37 , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->int:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self : List[str] ) ->Any:
'''simple docstring'''
return
def lowerCAmelCase__ ( self : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Tuple ) ->Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowerCAmelCase__ ( self : str ) ->int:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase : Union[str, Any] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase : Dict = model_class(lowerCamelCase__ )
_UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : int = [*signature.parameters.keys()]
_UpperCAmelCase : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_UpperCAmelCase : Union[str, Any] = outputs.hidden_states
_UpperCAmelCase : Union[str, Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# FocalNet has a different seq_length
_UpperCAmelCase : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_UpperCAmelCase : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = reshaped_hidden_states[0].shape
_UpperCAmelCase : Optional[int] = (
reshaped_hidden_states[0].view(lowerCamelCase__ , lowerCamelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Any = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : int = 3
_UpperCAmelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase : int = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : str = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , (padded_height, padded_width) )
@slow
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[Any] = FocalNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Tuple = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self : List[str] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : int = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_UpperCAmelCase : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Dict = model(**lowerCamelCase__ )
# verify the logits
_UpperCAmelCase : List[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : str = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase : int = FocalNetConfig
lowerCAmelCase : Tuple = False
def lowerCAmelCase__ ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase : List[str] = FocalNetModelTester(self )
| 40
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_UpperCAmelCase : str = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" , revision=__lowerCAmelCase )
| 40
| 1
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowerCamelCase__ = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
lowerCamelCase__ = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
lowerCamelCase__ = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return float((preds == labels).mean() )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = float(fa_score(y_true=__lowerCAmelCase , y_pred=__lowerCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Tuple = float(pearsonr(__lowerCAmelCase , __lowerCAmelCase )[0] )
_UpperCAmelCase : Union[str, Any] = float(spearmanr(__lowerCAmelCase , __lowerCAmelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowerCAmelCase__ ( self : int ) ->List[Any]:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowerCamelCase__ , lowerCamelCase__ )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowerCamelCase__ , lowerCamelCase__ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowerCamelCase__ , lowerCamelCase__ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
| 40
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "pixel_values"
lowerCAmelCase : Dict = False
lowerCAmelCase : Union[str, Any] = TimmBackboneConfig
def __init__( self : List[str] , lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
requires_backends(self , "timm" )
super().__init__(lowerCamelCase__ )
_UpperCAmelCase : Any = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
_UpperCAmelCase : Optional[Any] = getattr(lowerCamelCase__ , "use_pretrained_backbone" , lowerCamelCase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
_UpperCAmelCase : int = config.out_indices if getattr(lowerCamelCase__ , "out_indices" , lowerCamelCase__ ) is not None else (-1,)
_UpperCAmelCase : List[Any] = timm.create_model(
config.backbone , pretrained=lowerCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCamelCase__ , **lowerCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_UpperCAmelCase : List[str] = self._backbone.return_layers
_UpperCAmelCase : Optional[int] = {layer["module"]: str(lowerCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
_UpperCAmelCase : Any = kwargs.pop("config" , TimmBackboneConfig() )
_UpperCAmelCase : Dict = kwargs.pop("use_timm_backbone" , lowerCamelCase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
_UpperCAmelCase : str = kwargs.pop("num_channels" , config.num_channels )
_UpperCAmelCase : Dict = kwargs.pop("features_only" , config.features_only )
_UpperCAmelCase : str = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
_UpperCAmelCase : Optional[Any] = kwargs.pop("out_indices" , config.out_indices )
_UpperCAmelCase : Dict = TimmBackboneConfig(
backbone=lowerCamelCase__ , num_channels=lowerCamelCase__ , features_only=lowerCamelCase__ , use_pretrained_backbone=lowerCamelCase__ , out_indices=lowerCamelCase__ , )
return super()._from_config(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : Dict ) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
_UpperCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Dict = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_UpperCAmelCase : Optional[int] = self._all_layers
_UpperCAmelCase : List[str] = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self._return_layers
_UpperCAmelCase : Tuple = tuple(hidden_states[i] for i in self.out_indices )
else:
_UpperCAmelCase : Any = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Dict = tuple(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tuple(lowerCamelCase__ ) if hidden_states is not None else None
if not return_dict:
_UpperCAmelCase : Dict = (feature_maps,)
if output_hidden_states:
_UpperCAmelCase : List[str] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase__ , hidden_states=lowerCamelCase__ , attentions=lowerCamelCase__ )
| 40
| 1
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCamelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : int = len(__lowerCAmelCase )
for i in range(__lowerCAmelCase ):
_UpperCAmelCase : float = -1
for j in range(i + 1 , __lowerCAmelCase ):
if arr[i] < arr[j]:
_UpperCAmelCase : List[str] = arr[j]
break
result.append(__lowerCAmelCase )
return result
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = []
for i, outer in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_UpperCAmelCase : Optional[Any] = inner
break
result.append(__lowerCAmelCase )
return result
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Dict = len(__lowerCAmelCase )
_UpperCAmelCase : list[float] = []
_UpperCAmelCase : list[float] = [-1] * arr_size
for index in reversed(range(__lowerCAmelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_UpperCAmelCase : Dict = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCamelCase__ = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 40
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 40
| 1
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
_UpperCAmelCase : List[Any] = ""
while len(__lowerCAmelCase ) % 3 != 0:
_UpperCAmelCase : Tuple = "0" + bin_string
_UpperCAmelCase : Dict = [
bin_string[index : index + 3]
for index in range(len(__lowerCAmelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
_UpperCAmelCase : int = 0
for index, val in enumerate(__lowerCAmelCase ):
oct_val += int(2 ** (2 - index) * int(__lowerCAmelCase ) )
oct_string += str(__lowerCAmelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 40
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[Any]=10 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Tuple=32 * 8 , lowerCamelCase__ : int=32 * 8 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Any=64 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : Optional[Any] = use_auxiliary_loss
_UpperCAmelCase : Dict = num_queries
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Union[str, Any] = min_size
_UpperCAmelCase : Optional[int] = max_size
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : Optional[int] = hidden_dim
_UpperCAmelCase : Any = hidden_dim
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
_UpperCAmelCase : int = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
_UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCAmelCase : List[str] = self.num_queries
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Union[str, Any] = [1, 1, 1, 1]
_UpperCAmelCase : Any = self.num_channels
_UpperCAmelCase : int = 64
_UpperCAmelCase : int = 1_28
_UpperCAmelCase : int = self.hidden_dim
_UpperCAmelCase : List[Any] = self.hidden_dim
_UpperCAmelCase : Any = self.hidden_dim
return config
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = self.prepare_config_and_inputs()
_UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = output.encoder_hidden_states
_UpperCAmelCase : List[str] = output.pixel_decoder_hidden_states
_UpperCAmelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict=False ) ->str:
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : List[Any] = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : int = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : int = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCAmelCase : str = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = MaskaFormerModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class(lowerCamelCase__ )
_UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple = [*signature.parameters.keys()]
_UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCAmelCase : str = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = (self.model_tester.min_size,) * 2
_UpperCAmelCase : Optional[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
_UpperCAmelCase : int = self.model_tester.get_config()
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : str = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : Optional[Any] = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Any = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def __lowerCAmelCase ():
_UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
_UpperCAmelCase : int = self.default_image_processor
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : str = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : Dict = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : str = model(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : List[Any] = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : Tuple = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : List[Any] = self.default_image_processor
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
_UpperCAmelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_UpperCAmelCase : List[str] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_UpperCAmelCase : List[Any] = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
_UpperCAmelCase : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase : str = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : List[str] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase : str = inputs["pixel_values"].to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["mask_labels"]]
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase : int = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 40
| 1
|
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __lowerCAmelCase ():
_UpperCAmelCase : Optional[Any] = torch.nn.Linear(2 , 4 )
_UpperCAmelCase : List[str] = torch.optim.AdamW(model.parameters() , lr=1.0 )
_UpperCAmelCase : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(__lowerCAmelCase , max_lr=0.0_1 , steps_per_epoch=2 , epochs=1 )
_UpperCAmelCase : Dict = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
_UpperCAmelCase : str = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __lowerCAmelCase (__lowerCAmelCase ):
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(__lowerCAmelCase )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
@require_cuda
def lowerCAmelCase__ ( self : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(lowerCamelCase__ ):
_UpperCAmelCase : int = Accelerator(cpu=lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = Accelerator()
_UpperCAmelCase : List[Any] = GradientState()
assert state.num_steps == 1
_UpperCAmelCase : Optional[Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
_UpperCAmelCase : List[str] = False
assert state.sync_gradients is False
GradientState._reset_state()
def lowerCAmelCase__ ( self : str ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = create_components()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Dict = accelerator.prepare(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = create_components()
accelerator.prepare(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def lowerCAmelCase__ ( self : int ) ->Optional[Any]:
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ):
pass
with patch("torch.cuda.set_device" , lowerCamelCase__ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
_UpperCAmelCase : Optional[int] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = create_components()
accelerator.prepare(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Dict = get_signature(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCamelCase__ )
# make sure random weights don't match
load_random_weights(lowerCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase__ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(lowerCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase__ ) ) < 1E-3 )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = create_components()
accelerator.prepare(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Dict = get_signature(lowerCamelCase__ )
# saving hook
def save_config(lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : Dict ):
_UpperCAmelCase : Union[str, Any] = {"class_name": models[0].__class__.__name__}
with open(os.path.join(lowerCamelCase__ , "data.json" ) , "w" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
# loading hook
def load_config(lowerCamelCase__ : int , lowerCamelCase__ : int ):
with open(os.path.join(lowerCamelCase__ , "data.json" ) , "r" ) as f:
_UpperCAmelCase : List[str] = json.load(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = config["class_name"]
_UpperCAmelCase : List[Any] = accelerator.register_save_state_pre_hook(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = accelerator.register_load_state_pre_hook(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCamelCase__ )
# make sure random weights don't match with hooks
load_random_weights(lowerCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase__ ) ) > 1E-3 )
# random class name to verify correct one is loaded
_UpperCAmelCase : List[Any] = "random"
# make sure loaded weights match with hooks
accelerator.load_state(lowerCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase__ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCamelCase__ )
# make sure random weights don't match with hooks removed
load_random_weights(lowerCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase__ ) ) > 1E-3 )
# random class name to verify correct one is loaded
_UpperCAmelCase : int = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(lowerCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase__ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = create_components()
_UpperCAmelCase : List[str] = None
# This should work
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.assertTrue(dummy_obj is None )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = create_components()
_UpperCAmelCase : List[Any] = [1, 2, 3]
# This should work
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(
getattr(lowerCamelCase__ , "_is_accelerate_prepared" , lowerCamelCase__ ) , lowerCamelCase__ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(lowerCamelCase__ , "_is_accelerate_prepared" , lowerCamelCase__ ) , lowerCamelCase__ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCamelCase__ , "_is_accelerate_prepared" , lowerCamelCase__ ) , lowerCamelCase__ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCamelCase__ , "_is_accelerate_prepared" , lowerCamelCase__ ) , lowerCamelCase__ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCamelCase__ , "_is_accelerate_prepared" , lowerCamelCase__ ) , lowerCamelCase__ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCamelCase__ , "_is_accelerate_prepared" , lowerCamelCase__ ) , lowerCamelCase__ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
from transformers import AutoModelForCausalLM
_UpperCAmelCase : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowerCamelCase__ , device_map={"": 0} , )
_UpperCAmelCase : List[str] = Accelerator()
# This should work
_UpperCAmelCase : Optional[int] = accelerator.prepare(lowerCamelCase__ )
@slow
@require_bnb
def lowerCAmelCase__ ( self : int ) ->Tuple:
'''simple docstring'''
from transformers import AutoModelForCausalLM
_UpperCAmelCase : Optional[Any] = Accelerator()
with init_empty_weights():
_UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
_UpperCAmelCase : str = infer_auto_device_map(lowerCamelCase__ )
_UpperCAmelCase : Any = "cpu"
_UpperCAmelCase : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=lowerCamelCase__ , load_in_abit=lowerCamelCase__ , llm_inta_enable_fpaa_cpu_offload=lowerCamelCase__ )
# This should not work and get value error
with self.assertRaises(lowerCamelCase__ ):
_UpperCAmelCase : Dict = accelerator.prepare(lowerCamelCase__ )
@slow
@require_bnb
@require_multi_gpu
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
from transformers import AutoModelForCausalLM
_UpperCAmelCase : Tuple = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
_UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
_UpperCAmelCase : int = infer_auto_device_map(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowerCamelCase__ , device_map=lowerCamelCase__ , )
_UpperCAmelCase : Union[str, Any] = Accelerator()
# This should not work and get value error
with self.assertRaises(lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = accelerator.prepare(lowerCamelCase__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def lowerCAmelCase__ ( self : int ) ->Optional[int]:
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
_UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
_UpperCAmelCase : Tuple = infer_auto_device_map(lowerCamelCase__ )
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowerCamelCase__ , device_map=lowerCamelCase__ , )
_UpperCAmelCase : Optional[int] = Accelerator()
# This should work
_UpperCAmelCase : int = accelerator.prepare(lowerCamelCase__ )
@require_cuda
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = torch.nn.Linear(10 , 10 )
_UpperCAmelCase : List[str] = torch.optim.SGD(model.parameters() , lr=0.0_1 )
_UpperCAmelCase : int = Accelerator(cpu=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = accelerator.prepare(lowerCamelCase__ )
| 40
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def __lowerCAmelCase (__lowerCAmelCase ):
return int(x / 2**20 )
class lowerCAmelCase__ :
def __enter__( self : int ) ->Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_UpperCAmelCase : Tuple = torch.cuda.memory_allocated()
return self
def __exit__( self : Tuple , *lowerCamelCase__ : str ) ->int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
_UpperCAmelCase : List[str] = torch.cuda.memory_allocated()
_UpperCAmelCase : Tuple = torch.cuda.max_memory_allocated()
_UpperCAmelCase : List[Any] = bamb(self.end - self.begin )
_UpperCAmelCase : int = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = 16 , __lowerCAmelCase = "bert-base-cased" , __lowerCAmelCase = 320 , __lowerCAmelCase = 160 , ):
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : Any = load_dataset(
"glue" , "mrpc" , split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase : int = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_UpperCAmelCase : Any = DataLoader(
tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
_UpperCAmelCase : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : List[Any] = config["lr"]
_UpperCAmelCase : List[Any] = int(config["num_epochs"] )
_UpperCAmelCase : int = int(config["seed"] )
_UpperCAmelCase : Union[str, Any] = int(config["batch_size"] )
_UpperCAmelCase : Tuple = args.model_name_or_path
set_seed(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : List[str] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase )
# Instantiate optimizer
_UpperCAmelCase : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase : str = optimizer_cls(params=model.parameters() , lr=__lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[int] = (len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=0 , num_training_steps=__lowerCAmelCase , )
else:
_UpperCAmelCase : Optional[Any] = DummyScheduler(__lowerCAmelCase , total_num_steps=__lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase : str = 0
# Now we train the model
_UpperCAmelCase : Optional[Any] = {}
for epoch in range(__lowerCAmelCase , __lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = model(**__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = outputs.loss
_UpperCAmelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_UpperCAmelCase : Optional[int] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase ():
_UpperCAmelCase : Any = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__lowerCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCAmelCase , )
parser.add_argument(
"--output_dir" , type=__lowerCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=__lowerCAmelCase , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=__lowerCAmelCase , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=__lowerCAmelCase , default=1 , help="Number of train epochs." , )
_UpperCAmelCase : Tuple = parser.parse_args()
_UpperCAmelCase : Optional[Any] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 40
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
class lowerCAmelCase__ :
def __init__( self : Dict , lowerCamelCase__ : int ) ->None:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = size
# approximate the overall size of segment tree with given value
_UpperCAmelCase : int = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
_UpperCAmelCase : Optional[Any] = [0 for i in range(0 , 4 * size )]
_UpperCAmelCase : Union[str, Any] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int ) ->int:
'''simple docstring'''
return idx * 2
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : int ) ->int:
'''simple docstring'''
return idx * 2 + 1
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : list[int] ) ->None:
'''simple docstring'''
if left_element == right_element:
_UpperCAmelCase : List[Any] = a[left_element - 1]
else:
_UpperCAmelCase : Optional[Any] = (left_element + right_element) // 2
self.build(self.left(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.build(self.right(lowerCamelCase__ ) , mid + 1 , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : int = max(
self.segment_tree[self.left(lowerCamelCase__ )] , self.segment_tree[self.right(lowerCamelCase__ )] )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int ) ->bool:
'''simple docstring'''
if self.flag[idx] is True:
_UpperCAmelCase : Tuple = self.lazy[idx]
_UpperCAmelCase : Any = False
if left_element != right_element:
_UpperCAmelCase : List[str] = self.lazy[idx]
_UpperCAmelCase : List[str] = self.lazy[idx]
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : Any = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
_UpperCAmelCase : List[str] = val
if left_element != right_element:
_UpperCAmelCase : Tuple = val
_UpperCAmelCase : Union[str, Any] = val
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : str = True
return True
_UpperCAmelCase : Union[str, Any] = (left_element + right_element) // 2
self.update(self.left(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.update(self.right(lowerCamelCase__ ) , mid + 1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = max(
self.segment_tree[self.left(lowerCamelCase__ )] , self.segment_tree[self.right(lowerCamelCase__ )] )
return True
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int ) ->int | float:
'''simple docstring'''
if self.flag[idx] is True:
_UpperCAmelCase : Any = self.lazy[idx]
_UpperCAmelCase : str = False
if left_element != right_element:
_UpperCAmelCase : Optional[int] = self.lazy[idx]
_UpperCAmelCase : Optional[Any] = self.lazy[idx]
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : Tuple = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
_UpperCAmelCase : Optional[int] = (left_element + right_element) // 2
_UpperCAmelCase : str = self.query(self.left(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[str] = self.query(self.right(lowerCamelCase__ ) , mid + 1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return max(lowerCamelCase__ , lowerCamelCase__ )
def __str__( self : Optional[Any] ) ->str:
'''simple docstring'''
return str([self.query(1 , 1 , self.size , lowerCamelCase__ , lowerCamelCase__ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
lowerCamelCase__ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowerCamelCase__ = 15
lowerCamelCase__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 40
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase__ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCamelCase__ = {
'169M': 768,
'430M': 1_024,
'1B5': 2_048,
'3B': 2_560,
'7B': 4_096,
'14B': 5_120,
}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = list(state_dict.keys() )
for name in state_dict_keys:
_UpperCAmelCase : Optional[int] = state_dict.pop(__lowerCAmelCase )
# emb -> embedding
if name.startswith("emb." ):
_UpperCAmelCase : Tuple = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
_UpperCAmelCase : Optional[int] = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
_UpperCAmelCase : Union[str, Any] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __lowerCAmelCase )
# ffn -> feed_forward
_UpperCAmelCase : Dict = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __lowerCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
_UpperCAmelCase : int = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
_UpperCAmelCase : Union[str, Any] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
_UpperCAmelCase : int = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
_UpperCAmelCase : List[str] = "rwkv." + name
_UpperCAmelCase : Optional[Any] = weight
return state_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
_UpperCAmelCase : str = 50_277
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
_UpperCAmelCase : Tuple = PreTrainedTokenizerFast(tokenizer_file=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
# 2. Build the config
_UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_UpperCAmelCase : Optional[Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_UpperCAmelCase : Any = RwkvConfig(
vocab_size=__lowerCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCAmelCase )
# 3. Download model file then convert state_dict
_UpperCAmelCase : str = hf_hub_download(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = torch.load(__lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase : Any = convert_state_dict(__lowerCAmelCase )
# 4. Split in shards and save
_UpperCAmelCase , _UpperCAmelCase : List[str] = shard_checkpoint(__lowerCAmelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if index is not None:
_UpperCAmelCase : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# Save the index as well
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
_UpperCAmelCase : int = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n"
f.write(__lowerCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
_UpperCAmelCase : Union[str, Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_UpperCAmelCase : Union[str, Any] = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
_UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
model.push_to_hub(__lowerCAmelCase , max_shard_size="2GB" )
tokenizer.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowerCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 40
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Dict = "bert"
def __init__( self : Any , lowerCamelCase__ : List[Any]=3_05_22 , lowerCamelCase__ : Dict=7_68 , lowerCamelCase__ : List[str]=12 , lowerCamelCase__ : List[Any]=12 , lowerCamelCase__ : str=30_72 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : str=0.1 , lowerCamelCase__ : Tuple=5_12 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : List[Any]=1E-12 , lowerCamelCase__ : str=0 , lowerCamelCase__ : Optional[Any]="absolute" , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Tuple=None , **lowerCamelCase__ : str , ) ->Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : int = vocab_size
_UpperCAmelCase : Union[str, Any] = hidden_size
_UpperCAmelCase : List[Any] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : str = hidden_act
_UpperCAmelCase : Optional[Any] = intermediate_size
_UpperCAmelCase : Dict = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = max_position_embeddings
_UpperCAmelCase : Optional[int] = type_vocab_size
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : str = layer_norm_eps
_UpperCAmelCase : Any = position_embedding_type
_UpperCAmelCase : Any = use_cache
_UpperCAmelCase : Union[str, Any] = classifier_dropout
class lowerCAmelCase__ ( UpperCAmelCase__ ):
@property
def lowerCAmelCase__ ( self : Tuple ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCAmelCase : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 40
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __lowerCAmelCase (__lowerCAmelCase ):
return np.maximum(0 , __lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 40
| 1
|
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class lowerCAmelCase__ ( UpperCAmelCase__ ):
# warning at import time
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , UpperCAmelCase__ , )
| 40
|
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCAmelCase (__lowerCAmelCase ):
random.seed(__lowerCAmelCase )
np.random.seed(__lowerCAmelCase )
torch.manual_seed(__lowerCAmelCase )
torch.cuda.manual_seed_all(__lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class lowerCAmelCase__ :
def __init__( self : List[Any] , lowerCamelCase__ : Iterable[torch.nn.Parameter] , lowerCamelCase__ : float = 0.9_9_9_9 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 0 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Union[float, int] = 1.0 , lowerCamelCase__ : Union[float, int] = 2 / 3 , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : Dict[str, Any] = None , **lowerCamelCase__ : Optional[int] , ) ->Optional[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : List[Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : List[str] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCAmelCase : Optional[int] = True
if kwargs.get("max_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Tuple = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : str = kwargs["max_value"]
if kwargs.get("min_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Optional[int] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : Tuple = kwargs["min_value"]
_UpperCAmelCase : Optional[Any] = list(lowerCamelCase__ )
_UpperCAmelCase : Dict = [p.clone().detach() for p in parameters]
if kwargs.get("device" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Any = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
self.to(device=kwargs["device"] )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = decay
_UpperCAmelCase : Any = min_decay
_UpperCAmelCase : Optional[int] = update_after_step
_UpperCAmelCase : str = use_ema_warmup
_UpperCAmelCase : Union[str, Any] = inv_gamma
_UpperCAmelCase : Union[str, Any] = power
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : List[str] = None # set in `step()`
_UpperCAmelCase : Optional[int] = model_cls
_UpperCAmelCase : Union[str, Any] = model_config
@classmethod
def lowerCAmelCase__ ( cls : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->"EMAModel":
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = model_cls.load_config(lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model_cls.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : List[str] = cls(model.parameters() , model_cls=lowerCamelCase__ , model_config=model.config )
ema_model.load_state_dict(lowerCamelCase__ )
return ema_model
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_UpperCAmelCase : int = self.model_cls.from_config(self.model_config )
_UpperCAmelCase : Union[str, Any] = self.state_dict()
state_dict.pop("shadow_params" , lowerCamelCase__ )
model.register_to_config(**lowerCamelCase__ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : int ) ->float:
'''simple docstring'''
_UpperCAmelCase : int = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCAmelCase : int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCAmelCase : Any = (1 + step) / (10 + step)
_UpperCAmelCase : int = min(lowerCamelCase__ , self.decay )
# make sure decay is not smaller than min_decay
_UpperCAmelCase : Union[str, Any] = max(lowerCamelCase__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->Dict:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : Any = parameters.parameters()
_UpperCAmelCase : Dict = list(lowerCamelCase__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCAmelCase : Tuple = self.get_decay(self.optimization_step )
_UpperCAmelCase : Any = decay
_UpperCAmelCase : Optional[Any] = 1 - decay
_UpperCAmelCase : Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCAmelCase : str = deepspeed.zero.GatheredParameters(lowerCamelCase__ , modifier_rank=lowerCamelCase__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[str] = list(lowerCamelCase__ )
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[int]=None ) ->None:
'''simple docstring'''
_UpperCAmelCase : str = [
p.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ ) if p.is_floating_point() else p.to(device=lowerCamelCase__ )
for p in self.shadow_params
]
def lowerCAmelCase__ ( self : List[Any] ) ->dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , lowerCamelCase__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCAmelCase : int = None
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : dict ) ->None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = copy.deepcopy(lowerCamelCase__ )
_UpperCAmelCase : List[str] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_UpperCAmelCase : Union[str, Any] = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , lowerCamelCase__ ):
raise ValueError("Invalid min_decay" )
_UpperCAmelCase : List[str] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCamelCase__ ):
raise ValueError("Invalid optimization_step" )
_UpperCAmelCase : List[Any] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCamelCase__ ):
raise ValueError("Invalid update_after_step" )
_UpperCAmelCase : str = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCamelCase__ ):
raise ValueError("Invalid use_ema_warmup" )
_UpperCAmelCase : int = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
_UpperCAmelCase : Any = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
_UpperCAmelCase : List[str] = state_dict.get("shadow_params" , lowerCamelCase__ )
if shadow_params is not None:
_UpperCAmelCase : Optional[Any] = shadow_params
if not isinstance(self.shadow_params , lowerCamelCase__ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(lowerCamelCase__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 40
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Any = LEDTokenizer
lowerCAmelCase : int = LEDTokenizerFast
lowerCAmelCase : Dict = True
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
super().setUp()
_UpperCAmelCase : Any = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_UpperCAmelCase : Tuple = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
_UpperCAmelCase : Tuple = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCAmelCase : Optional[int] = {"unk_token": "<unk>"}
_UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : Dict , **lowerCamelCase__ : Optional[Any] ) ->Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , **lowerCamelCase__ : int ) ->Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Dict ) ->List[Any]:
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase__ ( self : Optional[Any] ) ->str:
'''simple docstring'''
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->Any:
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def lowerCAmelCase__ ( self : Any ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase : Dict = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : Any = tokenizer(lowerCamelCase__ , max_length=len(lowerCamelCase__ ) , padding=lowerCamelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_UpperCAmelCase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@require_torch
def lowerCAmelCase__ ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : Optional[Any] = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" )
self.assertIn("input_ids" , lowerCamelCase__ )
self.assertIn("attention_mask" , lowerCamelCase__ )
self.assertNotIn("labels" , lowerCamelCase__ )
self.assertNotIn("decoder_attention_mask" , lowerCamelCase__ )
@require_torch
def lowerCAmelCase__ ( self : Tuple ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : Dict = tokenizer(text_target=lowerCamelCase__ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : Union[str, Any] = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = ["A long paragraph for summarization."]
_UpperCAmelCase : List[str] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : List[str] = tokenizer(lowerCamelCase__ , return_tensors="pt" )
_UpperCAmelCase : Tuple = tokenizer(text_target=lowerCamelCase__ , return_tensors="pt" )
_UpperCAmelCase : str = inputs["input_ids"]
_UpperCAmelCase : Optional[int] = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase__ ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : List[Any] = ["Summary of the text.", "Another summary."]
_UpperCAmelCase : Dict = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_UpperCAmelCase : List[str] = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = [[0] * len(lowerCamelCase__ ) for x in encoded_output["input_ids"]]
_UpperCAmelCase : Tuple = tokenizer.pad(lowerCamelCase__ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Dict = "A, <mask> AllenNLP sentence."
_UpperCAmelCase : List[str] = tokenizer_r.encode_plus(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
_UpperCAmelCase : int = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
_UpperCAmelCase : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCamelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 40
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
lowerCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowerCamelCase__ = state_dict['cls.predictions.decoder.weight']
lowerCamelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.dense.{w}''']
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 40
| 1
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : List[Any] , lowerCamelCase__ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase__ : Optional[NamedSplit] = None , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Optional[Any] , ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = path_or_paths
_UpperCAmelCase : List[str] = split if split or isinstance(lowerCamelCase__ , lowerCamelCase__ ) else "train"
_UpperCAmelCase : str = features
_UpperCAmelCase : Tuple = cache_dir
_UpperCAmelCase : List[str] = keep_in_memory
_UpperCAmelCase : Optional[int] = streaming
_UpperCAmelCase : List[Any] = num_proc
_UpperCAmelCase : List[Any] = kwargs
@abstractmethod
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
'''simple docstring'''
pass
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : List[Any] , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : List[Any] , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = features
_UpperCAmelCase : List[Any] = cache_dir
_UpperCAmelCase : int = keep_in_memory
_UpperCAmelCase : Tuple = streaming
_UpperCAmelCase : List[str] = num_proc
_UpperCAmelCase : Optional[Any] = kwargs
@abstractmethod
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Union[Dataset, IterableDataset]:
'''simple docstring'''
pass
| 40
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : dict[str, list[str]] , lowerCamelCase__ : str ) ->None:
'''simple docstring'''
_UpperCAmelCase : Dict = graph
# mapping node to its parent in resulting breadth first tree
_UpperCAmelCase : dict[str, str | None] = {}
_UpperCAmelCase : List[Any] = source_vertex
def lowerCAmelCase__ ( self : Optional[int] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {self.source_vertex}
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[str] = [self.source_vertex] # first in first out queue
while queue:
_UpperCAmelCase : int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = vertex
queue.append(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
_UpperCAmelCase : int = self.parent.get(lowerCamelCase__ )
if target_vertex_parent is None:
_UpperCAmelCase : Tuple = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(lowerCamelCase__ )
return self.shortest_path(lowerCamelCase__ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 40
| 1
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = int(__lowerCAmelCase )
# Initialize Result
_UpperCAmelCase : List[str] = []
# Traverse through all denomination
for denomination in reversed(__lowerCAmelCase ):
# Find denominations
while int(__lowerCAmelCase ) >= int(__lowerCAmelCase ):
total_value -= int(__lowerCAmelCase )
answer.append(__lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCamelCase__ = []
lowerCamelCase__ = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
lowerCamelCase__ = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
lowerCamelCase__ = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCamelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
lowerCamelCase__ = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(F'''Following is minimal change for {value}: ''')
lowerCamelCase__ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 40
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = ["image_processor", "tokenizer"]
lowerCAmelCase : List[Any] = "BlipImageProcessor"
lowerCAmelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = False
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.image_processor
def __call__( self : Dict , lowerCamelCase__ : ImageInput = None , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Tuple , ) ->BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_UpperCAmelCase : Optional[int] = self.tokenizer
_UpperCAmelCase : List[Any] = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
return text_encoding
# add pixel_values
_UpperCAmelCase : Optional[int] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
if text is not None:
_UpperCAmelCase : Dict = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
else:
_UpperCAmelCase : int = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def lowerCAmelCase__ ( self : List[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.tokenizer.model_input_names
_UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 40
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : List[str] = KandinskyVaaPipeline
lowerCAmelCase : Optional[Any] = [
"image_embeds",
"negative_image_embeds",
]
lowerCAmelCase : List[str] = ["image_embeds", "negative_image_embeds"]
lowerCAmelCase : Dict = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCAmelCase : Any = False
@property
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self : List[Any] ) ->Tuple:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : int ) ->Union[str, Any]:
'''simple docstring'''
return 1_00
@property
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : Any = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_UpperCAmelCase : int = UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : Any = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.dummy_unet
_UpperCAmelCase : List[Any] = self.dummy_movq
_UpperCAmelCase : int = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCamelCase__ , )
_UpperCAmelCase : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any]=0 ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
_UpperCAmelCase : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith("mps" ):
_UpperCAmelCase : Tuple = torch.manual_seed(lowerCamelCase__ )
else:
_UpperCAmelCase : Any = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_UpperCAmelCase : Dict = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = "cpu"
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : List[Any] = self.pipeline_class(**lowerCamelCase__ )
_UpperCAmelCase : List[Any] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
_UpperCAmelCase : Union[str, Any] = output.images
_UpperCAmelCase : List[Any] = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ) , return_dict=lowerCamelCase__ , )[0]
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Tuple = np.array(
[0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Tuple ) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
_UpperCAmelCase : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
_UpperCAmelCase : Optional[int] = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase : Tuple = "red cat, 4k photo"
_UpperCAmelCase : List[str] = torch.Generator(device="cuda" ).manual_seed(0 )
_UpperCAmelCase , _UpperCAmelCase : List[str] = pipe_prior(
lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
_UpperCAmelCase : str = torch.Generator(device="cuda" ).manual_seed(0 )
_UpperCAmelCase : str = pipeline(
image_embeds=lowerCamelCase__ , negative_image_embeds=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=1_00 , output_type="np" , )
_UpperCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 40
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ): # noqa: E741
_UpperCAmelCase : List[str] = len(__lowerCAmelCase )
_UpperCAmelCase : str = 0
_UpperCAmelCase : List[str] = [0] * n
_UpperCAmelCase : int = [False] * n
_UpperCAmelCase : Dict = [False] * n
def dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if parent == root:
out_edge_count += 1
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : str = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_UpperCAmelCase : List[str] = dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Tuple = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_UpperCAmelCase : Dict = True
# AP found via cycle
if at == low[to]:
_UpperCAmelCase : Dict = True
else:
_UpperCAmelCase : Optional[int] = min(low[at] , __lowerCAmelCase )
return out_edge_count
for i in range(__lowerCAmelCase ):
if not visited[i]:
_UpperCAmelCase : str = 0
_UpperCAmelCase : Tuple = dfs(__lowerCAmelCase , __lowerCAmelCase , -1 , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = out_edge_count > 1
for x in range(len(__lowerCAmelCase ) ):
if is_art[x] is True:
print(__lowerCAmelCase )
# Adjacency list of graph
lowerCamelCase__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 40
| 1
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowerCamelCase__ = random.Random()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=1.0 , __lowerCAmelCase=None , __lowerCAmelCase=None ):
if rng is None:
_UpperCAmelCase : List[str] = global_rng
_UpperCAmelCase : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Any=7 , lowerCamelCase__ : int=4_00 , lowerCamelCase__ : Tuple=20_00 , lowerCamelCase__ : Optional[int]=1 , lowerCamelCase__ : str=0.0 , lowerCamelCase__ : Union[str, Any]=1_60_00 , lowerCamelCase__ : str=True , lowerCamelCase__ : int=True , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = parent
_UpperCAmelCase : List[str] = batch_size
_UpperCAmelCase : Dict = min_seq_length
_UpperCAmelCase : int = max_seq_length
_UpperCAmelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase : List[Any] = feature_size
_UpperCAmelCase : int = padding_value
_UpperCAmelCase : str = sampling_rate
_UpperCAmelCase : List[str] = return_attention_mask
_UpperCAmelCase : Union[str, Any] = do_normalize
def lowerCAmelCase__ ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Optional[Any]=False ) ->Optional[int]:
'''simple docstring'''
def _flatten(lowerCamelCase__ : int ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_UpperCAmelCase : Any = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_UpperCAmelCase : List[str] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase : Optional[Any] = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = WavaVecaFeatureExtractor
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = WavaVecaFeatureExtractionTester(self )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[Any] ) ->Tuple:
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowerCamelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def lowerCAmelCase__ ( self : Dict ) ->int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_UpperCAmelCase : Optional[Any] = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
_UpperCAmelCase : str = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
_UpperCAmelCase : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# Test batched
_UpperCAmelCase : Dict = feat_extract(lowerCamelCase__ , return_tensors="np" ).input_values
_UpperCAmelCase : Optional[Any] = feat_extract(lowerCamelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
_UpperCAmelCase : List[Any] = np.asarray(lowerCamelCase__ )
_UpperCAmelCase : List[str] = feat_extract(lowerCamelCase__ , return_tensors="np" ).input_values
_UpperCAmelCase : List[Any] = feat_extract(lowerCamelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
def lowerCAmelCase__ ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_UpperCAmelCase : Optional[int] = ["longest", "max_length", "do_not_pad"]
_UpperCAmelCase : int = [None, 16_00, None]
for max_length, padding in zip(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Any = feat_extract(lowerCamelCase__ , padding=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors="np" )
_UpperCAmelCase : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : int = range(8_00 , 14_00 , 2_00 )
_UpperCAmelCase : Optional[int] = [floats_list((1, x) )[0] for x in lengths]
_UpperCAmelCase : List[str] = ["longest", "max_length", "do_not_pad"]
_UpperCAmelCase : List[Any] = [None, 16_00, None]
for max_length, padding in zip(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = feat_extract(lowerCamelCase__ , max_length=lowerCamelCase__ , padding=lowerCamelCase__ )
_UpperCAmelCase : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : List[str] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_UpperCAmelCase : List[Any] = feat_extract(
lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=10_00 , padding="max_length" , return_tensors="np" )
_UpperCAmelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : Tuple = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_UpperCAmelCase : List[str] = feat_extract(
lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=10_00 , padding="longest" , return_tensors="np" )
_UpperCAmelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
_UpperCAmelCase : int = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_UpperCAmelCase : Dict = feat_extract(
lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=20_00 , padding="longest" , return_tensors="np" )
_UpperCAmelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
@require_torch
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
import torch
_UpperCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : Union[str, Any] = np.random.rand(1_00 ).astype(np.floataa )
_UpperCAmelCase : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase : int = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_UpperCAmelCase : Tuple = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
_UpperCAmelCase : List[Any] = WavaVecaConfig.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : List[str] = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == "layer" )
| 40
|
'''simple docstring'''
def __lowerCAmelCase ():
_UpperCAmelCase : str = 0
for i in range(1 , 1_001 ):
total += i**i
return str(__lowerCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 40
| 1
|
'''simple docstring'''
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
lowerCamelCase__ = True
from torch.cuda.amp import autocast
lowerCamelCase__ = logging.getLogger(__name__)
def __lowerCAmelCase (__lowerCAmelCase=None , __lowerCAmelCase=None ):
return field(default_factory=lambda: default , metadata=__lowerCAmelCase )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCAmelCase : Optional[bool] = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowerCAmelCase : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
lowerCAmelCase : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
lowerCAmelCase : Optional[float] = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
lowerCAmelCase : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
lowerCAmelCase : Optional[float] = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
lowerCAmelCase : Optional[float] = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase : Optional[str] = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
lowerCAmelCase : List[str] = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : WavaVecaProcessor
lowerCAmelCase : Union[bool, str] = True
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Optional[int] = None
def __call__( self : List[Any] , lowerCamelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) ->Dict[str, torch.Tensor]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = [{"input_values": feature["input_values"]} for feature in features]
_UpperCAmelCase : List[Any] = [{"input_ids": feature["labels"]} for feature in features]
_UpperCAmelCase : List[str] = self.processor.pad(
lowerCamelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
_UpperCAmelCase : str = self.processor.pad(
labels=lowerCamelCase__ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , )
# replace padding with -100 to ignore loss correctly
_UpperCAmelCase : Any = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_00 )
_UpperCAmelCase : Any = labels
return batch
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : nn.Module , lowerCamelCase__ : Dict[str, Union[torch.Tensor, Any]] ) ->torch.Tensor:
'''simple docstring'''
model.train()
_UpperCAmelCase : Optional[int] = self._prepare_inputs(lowerCamelCase__ )
if self.use_amp:
with autocast():
_UpperCAmelCase : Tuple = self.compute_loss(lowerCamelCase__ , lowerCamelCase__ )
else:
_UpperCAmelCase : Union[str, Any] = self.compute_loss(lowerCamelCase__ , lowerCamelCase__ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_UpperCAmelCase : Any = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_UpperCAmelCase : Optional[Any] = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
_UpperCAmelCase : int = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCamelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCamelCase__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCamelCase__ )
else:
loss.backward()
return loss.detach()
def __lowerCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCAmelCase : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , __lowerCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_UpperCAmelCase : Union[str, Any] = datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name )
_UpperCAmelCase : List[str] = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test" )
# Create and save tokenizer
_UpperCAmelCase : Dict = F"""[{''.join(data_args.chars_to_ignore )}]"""
def remove_special_characters(__lowerCAmelCase ):
_UpperCAmelCase : List[str] = re.sub(__lowerCAmelCase , "" , batch["sentence"] ).lower() + " "
return batch
_UpperCAmelCase : str = train_dataset.map(__lowerCAmelCase , remove_columns=["sentence"] )
_UpperCAmelCase : List[str] = eval_dataset.map(__lowerCAmelCase , remove_columns=["sentence"] )
def extract_all_chars(__lowerCAmelCase ):
_UpperCAmelCase : str = " ".join(batch["text"] )
_UpperCAmelCase : Dict = list(set(__lowerCAmelCase ) )
return {"vocab": [vocab], "all_text": [all_text]}
_UpperCAmelCase : Tuple = train_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , batch_size=-1 , keep_in_memory=__lowerCAmelCase , remove_columns=train_dataset.column_names , )
_UpperCAmelCase : List[str] = train_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , batch_size=-1 , keep_in_memory=__lowerCAmelCase , remove_columns=eval_dataset.column_names , )
_UpperCAmelCase : List[Any] = list(set(vocab_train["vocab"][0] ) | set(vocab_test["vocab"][0] ) )
_UpperCAmelCase : Dict = {v: k for k, v in enumerate(__lowerCAmelCase )}
_UpperCAmelCase : Dict = vocab_dict[" "]
del vocab_dict[" "]
_UpperCAmelCase : Tuple = len(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
with open("vocab.json" , "w" ) as vocab_file:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : Tuple = WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
_UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0.0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_UpperCAmelCase : int = min(len(__lowerCAmelCase ) , data_args.max_train_samples )
_UpperCAmelCase : Optional[int] = train_dataset.select(range(__lowerCAmelCase ) )
if data_args.max_val_samples is not None:
_UpperCAmelCase : List[Any] = eval_dataset.select(range(data_args.max_val_samples ) )
_UpperCAmelCase : Union[str, Any] = torchaudio.transforms.Resample(48_000 , 16_000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(__lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = torchaudio.load(batch["path"] )
_UpperCAmelCase : int = resampler(__lowerCAmelCase ).squeeze().numpy()
_UpperCAmelCase : str = 16_000
_UpperCAmelCase : Tuple = batch["text"]
return batch
_UpperCAmelCase : List[str] = train_dataset.map(
__lowerCAmelCase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_UpperCAmelCase : Tuple = eval_dataset.map(
__lowerCAmelCase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(__lowerCAmelCase ):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
_UpperCAmelCase : Optional[Any] = processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0] )
batch.update(__lowerCAmelCase )
return batch
_UpperCAmelCase : int = train_dataset.map(
__lowerCAmelCase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , )
_UpperCAmelCase : Tuple = eval_dataset.map(
__lowerCAmelCase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , )
# Metric
_UpperCAmelCase : List[Any] = datasets.load_metric("wer" )
def compute_metrics(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = pred.predictions
_UpperCAmelCase : Optional[int] = np.argmax(__lowerCAmelCase , axis=-1 )
_UpperCAmelCase : Any = processor.tokenizer.pad_token_id
_UpperCAmelCase : List[Any] = processor.batch_decode(__lowerCAmelCase )
# we do not want to group tokens when computing the metrics
_UpperCAmelCase : Optional[int] = processor.batch_decode(pred.label_ids , group_tokens=__lowerCAmelCase )
_UpperCAmelCase : str = wer_metric.compute(predictions=__lowerCAmelCase , references=__lowerCAmelCase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_UpperCAmelCase : str = DataCollatorCTCWithPadding(processor=__lowerCAmelCase , padding=__lowerCAmelCase )
# Initialize our Trainer
_UpperCAmelCase : Optional[int] = CTCTrainer(
model=__lowerCAmelCase , data_collator=__lowerCAmelCase , args=__lowerCAmelCase , compute_metrics=__lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCAmelCase : Dict = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_UpperCAmelCase : Optional[Any] = model_args.model_name_or_path
else:
_UpperCAmelCase : Dict = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_UpperCAmelCase : Optional[Any] = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
_UpperCAmelCase : List[Any] = train_result.metrics
_UpperCAmelCase : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCAmelCase )
)
_UpperCAmelCase : Tuple = min(__lowerCAmelCase , len(__lowerCAmelCase ) )
trainer.log_metrics("train" , __lowerCAmelCase )
trainer.save_metrics("train" , __lowerCAmelCase )
trainer.save_state()
# Evaluation
_UpperCAmelCase : Optional[Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCAmelCase : Tuple = trainer.evaluate()
_UpperCAmelCase : Optional[Any] = data_args.max_val_samples if data_args.max_val_samples is not None else len(__lowerCAmelCase )
_UpperCAmelCase : int = min(__lowerCAmelCase , len(__lowerCAmelCase ) )
trainer.log_metrics("eval" , __lowerCAmelCase )
trainer.save_metrics("eval" , __lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 40
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if dataset.ndim != value_array.ndim:
_UpperCAmelCase : Optional[Any] = (
"Wrong input data's dimensions... "
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCAmelCase : Optional[int] = (
"Wrong input data's shape... "
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_UpperCAmelCase : Union[str, Any] = (
"Input data have different datatype... "
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = []
for value in value_array:
_UpperCAmelCase : List[str] = euclidean(__lowerCAmelCase , dataset[0] )
_UpperCAmelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCAmelCase : int = euclidean(__lowerCAmelCase , __lowerCAmelCase )
if dist > temp_dist:
_UpperCAmelCase : Tuple = temp_dist
_UpperCAmelCase : Dict = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return np.dot(__lowerCAmelCase , __lowerCAmelCase ) / (norm(__lowerCAmelCase ) * norm(__lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
'''simple docstring'''
import math
import os
import sys
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = ""
try:
with open(__lowerCAmelCase , "rb" ) as binary_file:
_UpperCAmelCase : Tuple = binary_file.read()
for dat in data:
_UpperCAmelCase : str = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lexicon.pop(__lowerCAmelCase )
_UpperCAmelCase : str = last_match_id
if math.loga(__lowerCAmelCase ).is_integer():
for curr_key in lexicon:
_UpperCAmelCase : List[str] = "0" + lexicon[curr_key]
_UpperCAmelCase : str = bin(__lowerCAmelCase )[2:]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = {"0": "0", "1": "1"}
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = "", ""
_UpperCAmelCase : int = len(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_UpperCAmelCase : Tuple = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
index += 1
_UpperCAmelCase : Any = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
_UpperCAmelCase : List[str] = lexicon[curr_string]
result += last_match_id
return result
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[Any] = os.path.getsize(__lowerCAmelCase )
_UpperCAmelCase : List[str] = bin(__lowerCAmelCase )[2:]
_UpperCAmelCase : Dict = len(__lowerCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = 8
try:
with open(__lowerCAmelCase , "wb" ) as opened_file:
_UpperCAmelCase : Optional[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__lowerCAmelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[str] = read_file_binary(__lowerCAmelCase )
_UpperCAmelCase : Tuple = compress_data(__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = add_file_length(__lowerCAmelCase , __lowerCAmelCase )
write_file_binary(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 40
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase__ = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
_UpperCAmelCase : Optional[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowerCamelCase__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any=None ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCAmelCase : Tuple = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCAmelCase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_UpperCAmelCase : Tuple = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowerCamelCase__ , "w" , newline="\n" ) as f:
f.write(lowerCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__ )
with open(lowerCamelCase__ , "r" ) as f:
self.assertTrue(f.read() , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowerCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
# Copy consistency with a really long name
_UpperCAmelCase : int = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , lowerCamelCase__ , lowerCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowerCamelCase__ , overwrite_result=re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
| 40
| 1
|
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowerCamelCase__ = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(__lowerCAmelCase ) , version.parse(__lowerCAmelCase ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = None ):
_UpperCAmelCase : Optional[Any] = F"""\n{hint}""" if hint is not None else ""
# non-versioned check
if re.match(R"^[\w_\-\d]+$" , __lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = requirement, None, None
else:
_UpperCAmelCase : Union[str, Any] = re.findall(R"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , __lowerCAmelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
F""" got {requirement}""" )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = match[0]
_UpperCAmelCase : str = want_full.split("," ) # there could be multiple requirements
_UpperCAmelCase : List[str] = {}
for w in want_range:
_UpperCAmelCase : Optional[int] = re.findall(R"^([\s!=<>]{1,2})(.+)" , __lowerCAmelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
F""" but got {requirement}""" )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = match[0]
_UpperCAmelCase : Any = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
_UpperCAmelCase : List[str] = ".".join([str(__lowerCAmelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return
# check if any version is installed
try:
_UpperCAmelCase : List[Any] = importlib.metadata.version(__lowerCAmelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : int = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(__lowerCAmelCase , __lowerCAmelCase )
| 40
|
'''simple docstring'''
from math import factorial
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = real
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Any = [1] * rank
else:
_UpperCAmelCase : Dict = rank
def __repr__( self : str ) ->List[str]:
'''simple docstring'''
return (
F"""{self.real}+"""
F"""{'+'.join(str(lowerCamelCase__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCamelCase__ )
def __add__( self : Dict , lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return Dual(self.real + other , self.duals )
_UpperCAmelCase : Optional[int] = self.duals.copy()
_UpperCAmelCase : Optional[int] = other.duals.copy()
if len(lowerCamelCase__ ) > len(lowerCamelCase__ ):
o_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
elif len(lowerCamelCase__ ) < len(lowerCamelCase__ ):
s_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
_UpperCAmelCase : Union[str, Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCamelCase__ )
lowerCAmelCase : Tuple = __add__
def __sub__( self : List[Any] , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
return self + other * -1
def __mul__( self : List[str] , lowerCamelCase__ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = __mul__
def __truediv__( self : Optional[Any] , lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCamelCase__ )
raise ValueError
def __floordiv__( self : str , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Tuple = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCamelCase__ )
raise ValueError
def __pow__( self : Tuple , lowerCamelCase__ : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
if n < 0 or isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
_UpperCAmelCase : str = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not callable(__lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
_UpperCAmelCase : int = Dual(__lowerCAmelCase , 1 )
_UpperCAmelCase : Optional[int] = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCAmelCase (__lowerCAmelCase ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 40
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['ViTFeatureExtractor']
lowerCamelCase__ = ['ViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'VIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTForImageClassification',
'ViTForMaskedImageModeling',
'ViTModel',
'ViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'TFViTForImageClassification',
'TFViTModel',
'TFViTPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'FlaxViTForImageClassification',
'FlaxViTModel',
'FlaxViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 40
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 40
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Union[str, Any] = "nllb-moe"
lowerCAmelCase : Union[str, Any] = ["past_key_values"]
lowerCAmelCase : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Dict , lowerCamelCase__ : Optional[Any]=12_81_12 , lowerCamelCase__ : List[Any]=10_24 , lowerCamelCase__ : List[str]=12 , lowerCamelCase__ : Any=40_96 , lowerCamelCase__ : Union[str, Any]=16 , lowerCamelCase__ : Tuple=12 , lowerCamelCase__ : str=40_96 , lowerCamelCase__ : Optional[int]=16 , lowerCamelCase__ : Tuple=0.0_5 , lowerCamelCase__ : Tuple=0.0_5 , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Optional[Any]="relu" , lowerCamelCase__ : Tuple=10_24 , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : int=0.0_2 , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : Any="float32" , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Tuple=1_28 , lowerCamelCase__ : List[Any]=64 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Any=4 , lowerCamelCase__ : int=0.0_0_1 , lowerCamelCase__ : Any=0.0_0_1 , lowerCamelCase__ : Any="all" , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : Union[str, Any]=1.0 , lowerCamelCase__ : List[Any]=0.2 , lowerCamelCase__ : List[Any]=1 , lowerCamelCase__ : Any=0 , lowerCamelCase__ : str=2 , lowerCamelCase__ : int=False , **lowerCamelCase__ : List[Any] , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Union[str, Any] = max_position_embeddings
_UpperCAmelCase : int = d_model
_UpperCAmelCase : Optional[Any] = encoder_ffn_dim
_UpperCAmelCase : Any = encoder_layers
_UpperCAmelCase : List[str] = encoder_attention_heads
_UpperCAmelCase : List[Any] = decoder_ffn_dim
_UpperCAmelCase : List[str] = decoder_layers
_UpperCAmelCase : List[str] = decoder_attention_heads
_UpperCAmelCase : Optional[int] = dropout
_UpperCAmelCase : str = attention_dropout
_UpperCAmelCase : Any = activation_dropout
_UpperCAmelCase : str = activation_function
_UpperCAmelCase : Dict = init_std
_UpperCAmelCase : int = encoder_layerdrop
_UpperCAmelCase : Any = decoder_layerdrop
_UpperCAmelCase : Any = use_cache
_UpperCAmelCase : List[Any] = encoder_layers
_UpperCAmelCase : str = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase : str = router_z_loss_coef
_UpperCAmelCase : str = router_aux_loss_coef
_UpperCAmelCase : List[str] = decoder_sparse_step
_UpperCAmelCase : Union[str, Any] = encoder_sparse_step
_UpperCAmelCase : Optional[Any] = num_experts
_UpperCAmelCase : Union[str, Any] = expert_capacity
_UpperCAmelCase : Tuple = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
_UpperCAmelCase : Union[str, Any] = router_dtype
_UpperCAmelCase : str = router_ignore_padding_tokens
_UpperCAmelCase : int = batch_prioritized_routing
_UpperCAmelCase : List[Any] = second_expert_policy
_UpperCAmelCase : Any = normalize_router_prob_before_dropping
_UpperCAmelCase : Any = moe_eval_capacity_token_fraction
_UpperCAmelCase : Any = moe_token_dropout
_UpperCAmelCase : Tuple = output_router_logits
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
| 40
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : str=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Tuple=99 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Optional[int]=4 , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : int = use_attention_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Union[str, Any] = num_choices
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_attention_mask:
_UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
if self.use_token_type_ids:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Tuple = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Any = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : str = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )[0]
_UpperCAmelCase : int = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
_UpperCAmelCase : int = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
_UpperCAmelCase : str = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 40
| 1
|
'''simple docstring'''
import datasets
lowerCamelCase__ = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
lowerCamelCase__ = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
lowerCamelCase__ = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] ) ->str:
'''simple docstring'''
return {"accuracy": simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )}
| 40
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 40
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __lowerCAmelCase (__lowerCAmelCase="" ):
_UpperCAmelCase : Union[str, Any] = tempfile.mkdtemp()
return os.path.join(__lowerCAmelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int = torch.rand(12 , dtype=torch.floataa ) - 0.5
_UpperCAmelCase : Any = AgentAudio(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
# Ensure that the file contains the same value as the original tensor
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = sf.read(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , torch.tensor(lowerCamelCase__ ) , atol=1E-4 ) )
def lowerCAmelCase__ ( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5
_UpperCAmelCase : str = get_new_path(suffix=".wav" )
sf.write(lowerCamelCase__ , lowerCamelCase__ , 1_60_00 )
_UpperCAmelCase : List[Any] = AgentAudio(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowerCamelCase__ )
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = torch.randint(0 , 2_56 , (64, 64, 3) )
_UpperCAmelCase : Optional[Any] = AgentImage(lowerCamelCase__ )
_UpperCAmelCase : Any = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
_UpperCAmelCase : Dict = Image.open(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = AgentImage(lowerCamelCase__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
_UpperCAmelCase : Any = Image.open(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = AgentImage(lowerCamelCase__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = "Hey!"
_UpperCAmelCase : Union[str, Any] = AgentText(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , agent_type.to_string() )
self.assertEqual(lowerCamelCase__ , agent_type.to_raw() )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 40
|
'''simple docstring'''
import os
def __lowerCAmelCase ():
_UpperCAmelCase : List[Any] = os.path.join(os.path.dirname(__lowerCAmelCase ) , "num.txt" )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 40
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase__ = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ):
if attention_mask is None:
_UpperCAmelCase : Optional[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_UpperCAmelCase : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_UpperCAmelCase : List[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCAmelCase : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase__ :
def __init__( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any]=13 , lowerCamelCase__ : int=7 , lowerCamelCase__ : str=True , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : Optional[Any]=99 , lowerCamelCase__ : str=16 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Tuple=4 , lowerCamelCase__ : str=4 , lowerCamelCase__ : Optional[Any]="gelu" , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : List[Any]=32 , lowerCamelCase__ : str=2 , lowerCamelCase__ : str=1 , lowerCamelCase__ : List[str]=0 , lowerCamelCase__ : Optional[Any]=0.0_2 , ) ->int:
'''simple docstring'''
_UpperCAmelCase : int = parent
_UpperCAmelCase : List[str] = batch_size
_UpperCAmelCase : int = seq_length
_UpperCAmelCase : List[str] = is_training
_UpperCAmelCase : List[str] = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Tuple = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Optional[int] = intermediate_size
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Optional[Any] = eos_token_id
_UpperCAmelCase : Optional[Any] = pad_token_id
_UpperCAmelCase : str = bos_token_id
_UpperCAmelCase : Dict = initializer_range
def lowerCAmelCase__ ( self : str ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_UpperCAmelCase : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_UpperCAmelCase : List[str] = shift_tokens_right(lowerCamelCase__ , 1 , 2 )
_UpperCAmelCase : Tuple = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCamelCase__ , )
_UpperCAmelCase : Optional[Any] = prepare_blenderbot_inputs_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, inputs_dict
def lowerCAmelCase__ ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : List[str] = 20
_UpperCAmelCase : Optional[int] = model_class_name(lowerCamelCase__ )
_UpperCAmelCase : int = model.encode(inputs_dict["input_ids"] )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_UpperCAmelCase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
_UpperCAmelCase : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase : int = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
_UpperCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_UpperCAmelCase : List[Any] = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase__ , )
_UpperCAmelCase : Any = model.decode(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Any = 20
_UpperCAmelCase : List[Any] = model_class_name(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["input_ids"] )
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_UpperCAmelCase : Dict = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
_UpperCAmelCase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_UpperCAmelCase : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
_UpperCAmelCase : Tuple = model.decode(lowerCamelCase__ , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase : Tuple = 99
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_UpperCAmelCase : Optional[Any] = input_ids.shape[0]
_UpperCAmelCase : Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCAmelCase__ ( self : Tuple ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self._get_config_and_data()
_UpperCAmelCase : Dict = FlaxBlenderbotForConditionalGeneration(lowerCamelCase__ )
_UpperCAmelCase : List[str] = lm_model(input_ids=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_UpperCAmelCase : List[str] = FlaxBlenderbotForConditionalGeneration(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_UpperCAmelCase : Union[str, Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_UpperCAmelCase : Union[str, Any] = lm_model(input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_UpperCAmelCase : Any = shift_tokens_right(lowerCamelCase__ , 1 , 2 )
_UpperCAmelCase : Union[str, Any] = np.equal(lowerCamelCase__ , 1 ).astype(np.floataa ).sum()
_UpperCAmelCase : Optional[int] = np.equal(lowerCamelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCamelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase , UpperCAmelCase__ ):
lowerCAmelCase : int = True
lowerCAmelCase : Tuple = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase : List[str] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxBlenderbotModelTester(self )
def lowerCAmelCase__ ( self : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase : List[str] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = model_class(lowerCamelCase__ )
@jax.jit
def encode_jitted(lowerCamelCase__ : Dict , lowerCamelCase__ : str=None , **lowerCamelCase__ : List[str] ):
return model.encode(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
with self.subTest("JIT Enabled" ):
_UpperCAmelCase : Any = encode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCAmelCase : Dict = encode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase__ ( self : List[str] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase : Tuple = model_class(lowerCamelCase__ )
_UpperCAmelCase : Dict = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
_UpperCAmelCase : str = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int ):
return model.decode(
decoder_input_ids=lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , encoder_outputs=lowerCamelCase__ , )
with self.subTest("JIT Enabled" ):
_UpperCAmelCase : Any = decode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCAmelCase : int = decode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_UpperCAmelCase : Optional[int] = np.ones((1, 1) ) * model.config.eos_token_id
_UpperCAmelCase : int = model(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." )
@slow
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
_UpperCAmelCase : Any = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
_UpperCAmelCase : Optional[int] = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" )
_UpperCAmelCase : Tuple = ["Sam"]
_UpperCAmelCase : Dict = tokenizer(lowerCamelCase__ , return_tensors="jax" )
_UpperCAmelCase : Tuple = model.generate(**lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = "Sam is a great name. It means \"sun\" in Gaelic."
_UpperCAmelCase : str = tokenizer.batch_decode(lowerCamelCase__ , **lowerCamelCase__ )
assert generated_txt[0].strip() == tgt_text
| 40
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase__ = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int=1 ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = tokenizer
_UpperCAmelCase : Tuple = dataset
_UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ ) if n_tasks is None else n_tasks
_UpperCAmelCase : Any = n_copies
def __iter__( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
_UpperCAmelCase : Optional[Any] = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = start_length
_UpperCAmelCase : Union[str, Any] = eof_strings
_UpperCAmelCase : Union[str, Any] = tokenizer
def __call__( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_UpperCAmelCase : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase__ )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = re.split("(%s)" % "|".join(__lowerCAmelCase ) , __lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=20 , **__lowerCAmelCase ):
_UpperCAmelCase : Tuple = defaultdict(__lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowerCAmelCase ) ):
with torch.no_grad():
_UpperCAmelCase : Tuple = batch["ids"].shape[-1]
_UpperCAmelCase : Optional[int] = accelerator.unwrap_model(__lowerCAmelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=__lowerCAmelCase , **__lowerCAmelCase )
# each task is generated batch_size times
_UpperCAmelCase : str = batch["task_id"].repeat(__lowerCAmelCase )
_UpperCAmelCase : str = accelerator.pad_across_processes(
__lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
_UpperCAmelCase , _UpperCAmelCase : int = accelerator.gather((generated_tokens, generated_tasks) )
_UpperCAmelCase : Dict = generated_tokens.cpu().numpy()
_UpperCAmelCase : Dict = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowerCAmelCase , __lowerCAmelCase ):
gen_token_dict[task].append(__lowerCAmelCase )
_UpperCAmelCase : int = [[] for _ in range(__lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_UpperCAmelCase : List[Any] = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
code_gens[task].append(remove_last_block(__lowerCAmelCase ) )
return code_gens
def __lowerCAmelCase ():
# Setup configuration
_UpperCAmelCase : List[str] = HfArgumentParser(__lowerCAmelCase )
_UpperCAmelCase : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_UpperCAmelCase : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_UpperCAmelCase : List[str] = "false"
if args.num_workers is None:
_UpperCAmelCase : List[str] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_UpperCAmelCase : List[Any] = Accelerator()
set_seed(args.seed , device_specific=__lowerCAmelCase )
# Load model and tokenizer
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase : List[str] = tokenizer.eos_token
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_UpperCAmelCase : Tuple = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowerCAmelCase , __lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
_UpperCAmelCase : Union[str, Any] = load_dataset("openai_humaneval" )
_UpperCAmelCase : List[Any] = load_metric("code_eval" )
_UpperCAmelCase : Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
_UpperCAmelCase : Any = args.n_samples // args.batch_size
_UpperCAmelCase : Tuple = TokenizedDataset(__lowerCAmelCase , human_eval["test"] , n_copies=__lowerCAmelCase , n_tasks=__lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
_UpperCAmelCase : List[str] = DataLoader(__lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_UpperCAmelCase : Optional[int] = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
_UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Dict = complete_code(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , n_tasks=__lowerCAmelCase , batch_size=args.batch_size , **__lowerCAmelCase , )
if accelerator.is_main_process:
_UpperCAmelCase : List[Any] = []
for task in tqdm(range(__lowerCAmelCase ) ):
_UpperCAmelCase : str = human_eval["test"][task]["test"]
_UpperCAmelCase : Union[str, Any] = F"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
_UpperCAmelCase , _UpperCAmelCase : str = code_eval_metric.compute(
references=__lowerCAmelCase , predictions=__lowerCAmelCase , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 40
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : List[str] = ["pixel_values"]
def __init__( self : Optional[Any] , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[int, float] = 1 / 2_55 , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : bool = True , **lowerCamelCase__ : Optional[int] , ) ->None:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = size if size is not None else {"height": 3_84, "width": 3_84}
_UpperCAmelCase : str = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
_UpperCAmelCase : Dict = do_resize
_UpperCAmelCase : List[str] = size
_UpperCAmelCase : List[str] = resample
_UpperCAmelCase : Optional[Any] = do_rescale
_UpperCAmelCase : List[str] = rescale_factor
_UpperCAmelCase : Optional[int] = do_normalize
_UpperCAmelCase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_UpperCAmelCase : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD
_UpperCAmelCase : Tuple = do_convert_rgb
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : str , ) ->np.ndarray:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
_UpperCAmelCase : Optional[Any] = (size["height"], size["width"])
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[int, float] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Tuple , ) ->str:
'''simple docstring'''
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : List[str] , ) ->np.ndarray:
'''simple docstring'''
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : ImageInput , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[Dict[str, int]] = None , lowerCamelCase__ : PILImageResampling = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[float] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase__ : Dict , ) ->PIL.Image.Image:
'''simple docstring'''
_UpperCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : Any = resample if resample is not None else self.resample
_UpperCAmelCase : Any = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : Dict = image_std if image_std is not None else self.image_std
_UpperCAmelCase : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCAmelCase : List[Any] = size if size is not None else self.size
_UpperCAmelCase : Dict = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCAmelCase : List[Any] = [convert_to_rgb(lowerCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
_UpperCAmelCase : Any = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
_UpperCAmelCase : Union[str, Any] = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_rescale:
_UpperCAmelCase : List[Any] = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
_UpperCAmelCase : Dict = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
_UpperCAmelCase : List[str] = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
_UpperCAmelCase : List[str] = BatchFeature(data={"pixel_values": images} , tensor_type=lowerCamelCase__ )
return encoded_outputs
| 40
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 40
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase__ = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : int = VOCAB_FILES_NAMES
lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = ["input_ids", "attention_mask"]
lowerCAmelCase : List[Any] = RobertaTokenizer
def __init__( self : int , lowerCamelCase__ : int=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : Dict="replace" , lowerCamelCase__ : Any="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Any="</s>" , lowerCamelCase__ : Tuple="<s>" , lowerCamelCase__ : Tuple="<unk>" , lowerCamelCase__ : Dict="<pad>" , lowerCamelCase__ : Dict="<mask>" , lowerCamelCase__ : int=False , lowerCamelCase__ : int=True , **lowerCamelCase__ : Dict , ) ->Any:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , )
_UpperCAmelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
_UpperCAmelCase : Optional[Any] = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) )
_UpperCAmelCase : Dict = add_prefix_space
_UpperCAmelCase : Union[str, Any] = pre_tok_class(**lowerCamelCase__ )
_UpperCAmelCase : List[Any] = add_prefix_space
_UpperCAmelCase : Optional[int] = "post_processor"
_UpperCAmelCase : Optional[Any] = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
if tokenizer_component_instance:
_UpperCAmelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase : List[str] = tuple(state["sep"] )
if "cls" in state:
_UpperCAmelCase : str = tuple(state["cls"] )
_UpperCAmelCase : List[str] = False
if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
_UpperCAmelCase : Any = add_prefix_space
_UpperCAmelCase : Any = True
if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets:
_UpperCAmelCase : int = trim_offsets
_UpperCAmelCase : Union[str, Any] = True
if changes_to_apply:
_UpperCAmelCase : Any = getattr(lowerCamelCase__ , state.pop("type" ) )
_UpperCAmelCase : Optional[int] = component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
@property
def lowerCAmelCase__ ( self : List[str] ) ->str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value
_UpperCAmelCase : Any = value
def lowerCAmelCase__ ( self : Tuple , *lowerCamelCase__ : int , **lowerCamelCase__ : List[Any] ) ->BatchEncoding:
'''simple docstring'''
_UpperCAmelCase : int = kwargs.get("is_split_into_words" , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[Any] ) ->BatchEncoding:
'''simple docstring'''
_UpperCAmelCase : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Any=None ) ->int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = [self.sep_token_id]
_UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 40
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_UpperCAmelCase : str = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" , revision=__lowerCAmelCase )
| 40
| 1
|
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCAmelCase__ :
def __init__( self : Any , lowerCamelCase__ : List[Any] , ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : Optional[int] = 13
_UpperCAmelCase : str = 7
_UpperCAmelCase : int = 30
_UpperCAmelCase : List[str] = self.seq_length + self.mem_len
_UpperCAmelCase : Union[str, Any] = 15
_UpperCAmelCase : str = True
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : Union[str, Any] = 99
_UpperCAmelCase : int = [10, 50, 80]
_UpperCAmelCase : str = 32
_UpperCAmelCase : Union[str, Any] = 32
_UpperCAmelCase : List[str] = 4
_UpperCAmelCase : Optional[int] = 8
_UpperCAmelCase : int = 1_28
_UpperCAmelCase : Optional[Any] = 2
_UpperCAmelCase : Optional[int] = 2
_UpperCAmelCase : str = None
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : List[Any] = 3
_UpperCAmelCase : str = self.vocab_size - 1
_UpperCAmelCase : List[str] = 0.0_1
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_labels:
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowerCAmelCase__ ( self : List[str] ) ->Optional[int]:
'''simple docstring'''
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = TFTransfoXLModel(lowerCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = model(lowerCamelCase__ ).to_tuple()
_UpperCAmelCase : int = {"input_ids": input_ids_a, "mems": mems_a}
_UpperCAmelCase , _UpperCAmelCase : str = model(lowerCamelCase__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : int = TFTransfoXLLMHeadModel(lowerCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = model(lowerCamelCase__ ).to_tuple()
_UpperCAmelCase : List[str] = {"input_ids": input_ids_a, "labels": lm_labels}
_UpperCAmelCase , _UpperCAmelCase : List[Any] = model(lowerCamelCase__ ).to_tuple()
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = model([input_ids_a, mems_a] ).to_tuple()
_UpperCAmelCase : Any = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
_UpperCAmelCase , _UpperCAmelCase : List[Any] = model(lowerCamelCase__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : int = TFTransfoXLForSequenceClassification(lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) : Tuple = config_and_inputs
_UpperCAmelCase : str = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : List[Any] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowerCAmelCase : int = () if is_tf_available() else ()
lowerCAmelCase : str = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowerCAmelCase : int = False
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : Dict = False
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] ) ->Any:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowerCAmelCase__ ( self : str ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = TFTransfoXLModelTester(self )
_UpperCAmelCase : List[Any] = ConfigTester(self , config_class=lowerCamelCase__ , d_embed=37 )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
self.model_tester.set_seed()
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->List[Any]:
'''simple docstring'''
self.model_tester.set_seed()
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_UpperCAmelCase : Dict = model_class(lowerCamelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_UpperCAmelCase : Optional[int] = model.get_output_embeddings()
assert isinstance(lowerCamelCase__ , tf.keras.layers.Layer )
_UpperCAmelCase : Optional[int] = model.get_bias()
assert name is None
else:
_UpperCAmelCase : Union[str, Any] = model.get_output_embeddings()
assert x is None
_UpperCAmelCase : Optional[int] = model.get_bias()
assert name is None
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
pass
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Dict = TFTransfoXLModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss." )
def lowerCAmelCase__ ( self : str ) ->Any:
'''simple docstring'''
pass
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
@unittest.skip("Skip test until #12651 is resolved." )
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : List[Any] = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" )
# fmt: off
_UpperCAmelCase : int = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_UpperCAmelCase : str = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_UpperCAmelCase : Union[str, Any] = model.generate(lowerCamelCase__ , max_length=2_00 , do_sample=lowerCamelCase__ )
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase__ )
| 40
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "pixel_values"
lowerCAmelCase : Dict = False
lowerCAmelCase : Union[str, Any] = TimmBackboneConfig
def __init__( self : List[str] , lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
requires_backends(self , "timm" )
super().__init__(lowerCamelCase__ )
_UpperCAmelCase : Any = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
_UpperCAmelCase : Optional[Any] = getattr(lowerCamelCase__ , "use_pretrained_backbone" , lowerCamelCase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
_UpperCAmelCase : int = config.out_indices if getattr(lowerCamelCase__ , "out_indices" , lowerCamelCase__ ) is not None else (-1,)
_UpperCAmelCase : List[Any] = timm.create_model(
config.backbone , pretrained=lowerCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCamelCase__ , **lowerCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_UpperCAmelCase : List[str] = self._backbone.return_layers
_UpperCAmelCase : Optional[int] = {layer["module"]: str(lowerCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
_UpperCAmelCase : Any = kwargs.pop("config" , TimmBackboneConfig() )
_UpperCAmelCase : Dict = kwargs.pop("use_timm_backbone" , lowerCamelCase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
_UpperCAmelCase : str = kwargs.pop("num_channels" , config.num_channels )
_UpperCAmelCase : Dict = kwargs.pop("features_only" , config.features_only )
_UpperCAmelCase : str = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
_UpperCAmelCase : Optional[Any] = kwargs.pop("out_indices" , config.out_indices )
_UpperCAmelCase : Dict = TimmBackboneConfig(
backbone=lowerCamelCase__ , num_channels=lowerCamelCase__ , features_only=lowerCamelCase__ , use_pretrained_backbone=lowerCamelCase__ , out_indices=lowerCamelCase__ , )
return super()._from_config(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : Dict ) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
_UpperCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Dict = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_UpperCAmelCase : Optional[int] = self._all_layers
_UpperCAmelCase : List[str] = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self._return_layers
_UpperCAmelCase : Tuple = tuple(hidden_states[i] for i in self.out_indices )
else:
_UpperCAmelCase : Any = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Dict = tuple(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tuple(lowerCamelCase__ ) if hidden_states is not None else None
if not return_dict:
_UpperCAmelCase : Dict = (feature_maps,)
if output_hidden_states:
_UpperCAmelCase : List[str] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase__ , hidden_states=lowerCamelCase__ , attentions=lowerCamelCase__ )
| 40
| 1
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
# ===== initialization =====
_UpperCAmelCase : Union[str, Any] = Mock()
_UpperCAmelCase : Tuple = conn, Mock()
_UpperCAmelCase : Union[str, Any] = iter([1, None] )
_UpperCAmelCase : List[Any] = lambda __lowerCAmelCase : next(__lowerCAmelCase )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=__lowerCAmelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 40
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 40
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__lowerCAmelCase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , )
def __lowerCAmelCase ():
_UpperCAmelCase : str = [90, 23, 6, 33, 21, 65, 123, 34_423]
_UpperCAmelCase : List[str] = math.log(len(__lowerCAmelCase ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 40
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[Any]=10 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Tuple=32 * 8 , lowerCamelCase__ : int=32 * 8 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Any=64 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : Optional[Any] = use_auxiliary_loss
_UpperCAmelCase : Dict = num_queries
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Union[str, Any] = min_size
_UpperCAmelCase : Optional[int] = max_size
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : Optional[int] = hidden_dim
_UpperCAmelCase : Any = hidden_dim
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
_UpperCAmelCase : int = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
_UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCAmelCase : List[str] = self.num_queries
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Union[str, Any] = [1, 1, 1, 1]
_UpperCAmelCase : Any = self.num_channels
_UpperCAmelCase : int = 64
_UpperCAmelCase : int = 1_28
_UpperCAmelCase : int = self.hidden_dim
_UpperCAmelCase : List[Any] = self.hidden_dim
_UpperCAmelCase : Any = self.hidden_dim
return config
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = self.prepare_config_and_inputs()
_UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = output.encoder_hidden_states
_UpperCAmelCase : List[str] = output.pixel_decoder_hidden_states
_UpperCAmelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict=False ) ->str:
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : List[Any] = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : int = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : int = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCAmelCase : str = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = MaskaFormerModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class(lowerCamelCase__ )
_UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple = [*signature.parameters.keys()]
_UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCAmelCase : str = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = (self.model_tester.min_size,) * 2
_UpperCAmelCase : Optional[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
_UpperCAmelCase : int = self.model_tester.get_config()
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : str = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : Optional[Any] = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Any = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def __lowerCAmelCase ():
_UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
_UpperCAmelCase : int = self.default_image_processor
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : str = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : Dict = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : str = model(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : List[Any] = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : Tuple = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : List[Any] = self.default_image_processor
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
_UpperCAmelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_UpperCAmelCase : List[str] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_UpperCAmelCase : List[Any] = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
_UpperCAmelCase : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase : str = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : List[str] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase : str = inputs["pixel_values"].to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["mask_labels"]]
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase : int = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 40
| 1
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
return "".join([hex(__lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(__lowerCAmelCase )] )
def __lowerCAmelCase (__lowerCAmelCase ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(__lowerCAmelCase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__lowerCAmelCase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def __lowerCAmelCase (__lowerCAmelCase ):
return int(x / 2**20 )
class lowerCAmelCase__ :
def __enter__( self : int ) ->Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_UpperCAmelCase : Tuple = torch.cuda.memory_allocated()
return self
def __exit__( self : Tuple , *lowerCamelCase__ : str ) ->int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
_UpperCAmelCase : List[str] = torch.cuda.memory_allocated()
_UpperCAmelCase : Tuple = torch.cuda.max_memory_allocated()
_UpperCAmelCase : List[Any] = bamb(self.end - self.begin )
_UpperCAmelCase : int = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = 16 , __lowerCAmelCase = "bert-base-cased" , __lowerCAmelCase = 320 , __lowerCAmelCase = 160 , ):
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : Any = load_dataset(
"glue" , "mrpc" , split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase : int = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_UpperCAmelCase : Any = DataLoader(
tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
_UpperCAmelCase : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : List[Any] = config["lr"]
_UpperCAmelCase : List[Any] = int(config["num_epochs"] )
_UpperCAmelCase : int = int(config["seed"] )
_UpperCAmelCase : Union[str, Any] = int(config["batch_size"] )
_UpperCAmelCase : Tuple = args.model_name_or_path
set_seed(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : List[str] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase )
# Instantiate optimizer
_UpperCAmelCase : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase : str = optimizer_cls(params=model.parameters() , lr=__lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[int] = (len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=0 , num_training_steps=__lowerCAmelCase , )
else:
_UpperCAmelCase : Optional[Any] = DummyScheduler(__lowerCAmelCase , total_num_steps=__lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase : str = 0
# Now we train the model
_UpperCAmelCase : Optional[Any] = {}
for epoch in range(__lowerCAmelCase , __lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = model(**__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = outputs.loss
_UpperCAmelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_UpperCAmelCase : Optional[int] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase ():
_UpperCAmelCase : Any = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__lowerCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCAmelCase , )
parser.add_argument(
"--output_dir" , type=__lowerCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=__lowerCAmelCase , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=__lowerCAmelCase , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=__lowerCAmelCase , default=1 , help="Number of train epochs." , )
_UpperCAmelCase : Tuple = parser.parse_args()
_UpperCAmelCase : Optional[Any] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 40
| 1
|
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowerCAmelCase__ :
def __init__( self : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Any=False , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : int=19 , lowerCamelCase__ : str=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : List[str]=4 , lowerCamelCase__ : str=37 , lowerCamelCase__ : List[str]="gelu" , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : int=5_12 , lowerCamelCase__ : List[Any]=16 , lowerCamelCase__ : int=2 , lowerCamelCase__ : Any=0.0_2 , lowerCamelCase__ : List[str]=3 , lowerCamelCase__ : int=4 , lowerCamelCase__ : Dict=None , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : List[str] = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : Union[str, Any] = use_input_mask
_UpperCAmelCase : Any = use_token_type_ids
_UpperCAmelCase : Optional[int] = use_labels
_UpperCAmelCase : Optional[Any] = vocab_size
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : Tuple = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : List[str] = intermediate_size
_UpperCAmelCase : Tuple = hidden_act
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : Dict = attention_probs_dropout_prob
_UpperCAmelCase : Dict = max_position_embeddings
_UpperCAmelCase : int = type_vocab_size
_UpperCAmelCase : List[Any] = type_sequence_label_size
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : Dict = num_labels
_UpperCAmelCase : Dict = num_choices
_UpperCAmelCase : List[Any] = scope
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : List[Any] = None
if self.use_input_mask:
_UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Any = None
_UpperCAmelCase : str = None
if self.use_labels:
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=lowerCamelCase__ , esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} , )
return config
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : str = EsmForProteinFolding(config=lowerCamelCase__ ).float()
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = model(lowerCamelCase__ )
_UpperCAmelCase : str = model(lowerCamelCase__ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Any = config_and_inputs
_UpperCAmelCase : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : str = False
lowerCAmelCase : Union[str, Any] = (EsmForProteinFolding,) if is_torch_available() else ()
lowerCAmelCase : Optional[Any] = ()
lowerCAmelCase : str = {} if is_torch_available() else {}
lowerCAmelCase : str = False
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = EsmFoldModelTester(self )
_UpperCAmelCase : str = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Tuple ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@unittest.skip("Does not support attention outputs" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
pass
@unittest.skip
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
'''simple docstring'''
pass
@unittest.skip("Esm does not support embedding resizing" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("Esm does not support embedding resizing" )
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support passing input embeds!" )
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCAmelCase__ ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not output hidden states in the normal way." )
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip("ESMfold does not output hidden states in the normal way." )
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip("ESMFold only has one output format." )
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality" )
def lowerCAmelCase__ ( self : List[str] ) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support input chunking." )
def lowerCAmelCase__ ( self : int ) ->str:
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." )
def lowerCAmelCase__ ( self : int ) ->Tuple:
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def lowerCAmelCase__ ( self : Tuple ) ->Tuple:
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't support data parallel." )
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
pass
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ ):
@slow
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float()
model.eval()
_UpperCAmelCase : str = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )["positions"]
_UpperCAmelCase : List[Any] = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , lowerCamelCase__ , atol=1E-4 ) )
| 40
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase__ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCamelCase__ = {
'169M': 768,
'430M': 1_024,
'1B5': 2_048,
'3B': 2_560,
'7B': 4_096,
'14B': 5_120,
}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = list(state_dict.keys() )
for name in state_dict_keys:
_UpperCAmelCase : Optional[int] = state_dict.pop(__lowerCAmelCase )
# emb -> embedding
if name.startswith("emb." ):
_UpperCAmelCase : Tuple = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
_UpperCAmelCase : Optional[int] = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
_UpperCAmelCase : Union[str, Any] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __lowerCAmelCase )
# ffn -> feed_forward
_UpperCAmelCase : Dict = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __lowerCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
_UpperCAmelCase : int = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
_UpperCAmelCase : Union[str, Any] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
_UpperCAmelCase : int = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
_UpperCAmelCase : List[str] = "rwkv." + name
_UpperCAmelCase : Optional[Any] = weight
return state_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
_UpperCAmelCase : str = 50_277
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
_UpperCAmelCase : Tuple = PreTrainedTokenizerFast(tokenizer_file=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
# 2. Build the config
_UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_UpperCAmelCase : Optional[Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_UpperCAmelCase : Any = RwkvConfig(
vocab_size=__lowerCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCAmelCase )
# 3. Download model file then convert state_dict
_UpperCAmelCase : str = hf_hub_download(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = torch.load(__lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase : Any = convert_state_dict(__lowerCAmelCase )
# 4. Split in shards and save
_UpperCAmelCase , _UpperCAmelCase : List[str] = shard_checkpoint(__lowerCAmelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if index is not None:
_UpperCAmelCase : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# Save the index as well
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
_UpperCAmelCase : int = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n"
f.write(__lowerCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
_UpperCAmelCase : Union[str, Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_UpperCAmelCase : Union[str, Any] = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
_UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
model.push_to_hub(__lowerCAmelCase , max_shard_size="2GB" )
tokenizer.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowerCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 40
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int]=7 , lowerCamelCase__ : Optional[int]=3 , lowerCamelCase__ : Optional[int]=18 , lowerCamelCase__ : int=30 , lowerCamelCase__ : Optional[int]=4_00 , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : int=None , lowerCamelCase__ : Tuple=True , ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = size if size is not None else {"height": 18, "width": 18}
_UpperCAmelCase : Optional[Any] = parent
_UpperCAmelCase : int = batch_size
_UpperCAmelCase : Optional[Any] = num_channels
_UpperCAmelCase : str = image_size
_UpperCAmelCase : Dict = min_resolution
_UpperCAmelCase : Optional[int] = max_resolution
_UpperCAmelCase : Optional[int] = do_resize
_UpperCAmelCase : Optional[int] = size
_UpperCAmelCase : Tuple = apply_ocr
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self : List[str] ) ->List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "apply_ocr" ) )
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
_UpperCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Dict ) ->int:
'''simple docstring'''
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
_UpperCAmelCase : str = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , lowerCamelCase__ )
self.assertIsInstance(encoding.boxes , lowerCamelCase__ )
# Test batched
_UpperCAmelCase : Tuple = image_processing(lowerCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
_UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
_UpperCAmelCase : List[str] = image_processing(lowerCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
_UpperCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
_UpperCAmelCase : int = image_processing(lowerCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCAmelCase__ ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = LayoutLMvaImageProcessor()
from datasets import load_dataset
_UpperCAmelCase : List[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
_UpperCAmelCase : Any = Image.open(ds[0]["file"] ).convert("RGB" )
_UpperCAmelCase : Optional[Any] = image_processing(lowerCamelCase__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_UpperCAmelCase : str = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
_UpperCAmelCase : Dict = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCamelCase__ )
self.assertListEqual(encoding.boxes , lowerCamelCase__ )
# with apply_OCR = False
_UpperCAmelCase : str = LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ )
_UpperCAmelCase : int = image_processing(lowerCamelCase__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 40
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __lowerCAmelCase (__lowerCAmelCase ):
return np.maximum(0 , __lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 40
| 1
|
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCAmelCase (__lowerCAmelCase ):
random.seed(__lowerCAmelCase )
np.random.seed(__lowerCAmelCase )
torch.manual_seed(__lowerCAmelCase )
torch.cuda.manual_seed_all(__lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class lowerCAmelCase__ :
def __init__( self : List[Any] , lowerCamelCase__ : Iterable[torch.nn.Parameter] , lowerCamelCase__ : float = 0.9_9_9_9 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 0 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Union[float, int] = 1.0 , lowerCamelCase__ : Union[float, int] = 2 / 3 , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : Dict[str, Any] = None , **lowerCamelCase__ : Optional[int] , ) ->Optional[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : List[Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : List[str] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCAmelCase : Optional[int] = True
if kwargs.get("max_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Tuple = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : str = kwargs["max_value"]
if kwargs.get("min_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Optional[int] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : Tuple = kwargs["min_value"]
_UpperCAmelCase : Optional[Any] = list(lowerCamelCase__ )
_UpperCAmelCase : Dict = [p.clone().detach() for p in parameters]
if kwargs.get("device" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Any = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
self.to(device=kwargs["device"] )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = decay
_UpperCAmelCase : Any = min_decay
_UpperCAmelCase : Optional[int] = update_after_step
_UpperCAmelCase : str = use_ema_warmup
_UpperCAmelCase : Union[str, Any] = inv_gamma
_UpperCAmelCase : Union[str, Any] = power
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : List[str] = None # set in `step()`
_UpperCAmelCase : Optional[int] = model_cls
_UpperCAmelCase : Union[str, Any] = model_config
@classmethod
def lowerCAmelCase__ ( cls : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->"EMAModel":
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = model_cls.load_config(lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model_cls.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : List[str] = cls(model.parameters() , model_cls=lowerCamelCase__ , model_config=model.config )
ema_model.load_state_dict(lowerCamelCase__ )
return ema_model
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_UpperCAmelCase : int = self.model_cls.from_config(self.model_config )
_UpperCAmelCase : Union[str, Any] = self.state_dict()
state_dict.pop("shadow_params" , lowerCamelCase__ )
model.register_to_config(**lowerCamelCase__ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : int ) ->float:
'''simple docstring'''
_UpperCAmelCase : int = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCAmelCase : int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCAmelCase : Any = (1 + step) / (10 + step)
_UpperCAmelCase : int = min(lowerCamelCase__ , self.decay )
# make sure decay is not smaller than min_decay
_UpperCAmelCase : Union[str, Any] = max(lowerCamelCase__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->Dict:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : Any = parameters.parameters()
_UpperCAmelCase : Dict = list(lowerCamelCase__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCAmelCase : Tuple = self.get_decay(self.optimization_step )
_UpperCAmelCase : Any = decay
_UpperCAmelCase : Optional[Any] = 1 - decay
_UpperCAmelCase : Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCAmelCase : str = deepspeed.zero.GatheredParameters(lowerCamelCase__ , modifier_rank=lowerCamelCase__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[str] = list(lowerCamelCase__ )
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[int]=None ) ->None:
'''simple docstring'''
_UpperCAmelCase : str = [
p.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ ) if p.is_floating_point() else p.to(device=lowerCamelCase__ )
for p in self.shadow_params
]
def lowerCAmelCase__ ( self : List[Any] ) ->dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , lowerCamelCase__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCAmelCase : int = None
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : dict ) ->None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = copy.deepcopy(lowerCamelCase__ )
_UpperCAmelCase : List[str] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_UpperCAmelCase : Union[str, Any] = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , lowerCamelCase__ ):
raise ValueError("Invalid min_decay" )
_UpperCAmelCase : List[str] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCamelCase__ ):
raise ValueError("Invalid optimization_step" )
_UpperCAmelCase : List[Any] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCamelCase__ ):
raise ValueError("Invalid update_after_step" )
_UpperCAmelCase : str = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCamelCase__ ):
raise ValueError("Invalid use_ema_warmup" )
_UpperCAmelCase : int = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
_UpperCAmelCase : Any = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
_UpperCAmelCase : List[str] = state_dict.get("shadow_params" , lowerCamelCase__ )
if shadow_params is not None:
_UpperCAmelCase : Optional[Any] = shadow_params
if not isinstance(self.shadow_params , lowerCamelCase__ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(lowerCamelCase__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 40
|
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCAmelCase (__lowerCAmelCase ):
random.seed(__lowerCAmelCase )
np.random.seed(__lowerCAmelCase )
torch.manual_seed(__lowerCAmelCase )
torch.cuda.manual_seed_all(__lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class lowerCAmelCase__ :
def __init__( self : List[Any] , lowerCamelCase__ : Iterable[torch.nn.Parameter] , lowerCamelCase__ : float = 0.9_9_9_9 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 0 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Union[float, int] = 1.0 , lowerCamelCase__ : Union[float, int] = 2 / 3 , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : Dict[str, Any] = None , **lowerCamelCase__ : Optional[int] , ) ->Optional[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : List[Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : List[str] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCAmelCase : Optional[int] = True
if kwargs.get("max_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Tuple = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : str = kwargs["max_value"]
if kwargs.get("min_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Optional[int] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : Tuple = kwargs["min_value"]
_UpperCAmelCase : Optional[Any] = list(lowerCamelCase__ )
_UpperCAmelCase : Dict = [p.clone().detach() for p in parameters]
if kwargs.get("device" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Any = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
self.to(device=kwargs["device"] )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = decay
_UpperCAmelCase : Any = min_decay
_UpperCAmelCase : Optional[int] = update_after_step
_UpperCAmelCase : str = use_ema_warmup
_UpperCAmelCase : Union[str, Any] = inv_gamma
_UpperCAmelCase : Union[str, Any] = power
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : List[str] = None # set in `step()`
_UpperCAmelCase : Optional[int] = model_cls
_UpperCAmelCase : Union[str, Any] = model_config
@classmethod
def lowerCAmelCase__ ( cls : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->"EMAModel":
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = model_cls.load_config(lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model_cls.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : List[str] = cls(model.parameters() , model_cls=lowerCamelCase__ , model_config=model.config )
ema_model.load_state_dict(lowerCamelCase__ )
return ema_model
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_UpperCAmelCase : int = self.model_cls.from_config(self.model_config )
_UpperCAmelCase : Union[str, Any] = self.state_dict()
state_dict.pop("shadow_params" , lowerCamelCase__ )
model.register_to_config(**lowerCamelCase__ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : int ) ->float:
'''simple docstring'''
_UpperCAmelCase : int = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCAmelCase : int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCAmelCase : Any = (1 + step) / (10 + step)
_UpperCAmelCase : int = min(lowerCamelCase__ , self.decay )
# make sure decay is not smaller than min_decay
_UpperCAmelCase : Union[str, Any] = max(lowerCamelCase__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->Dict:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : Any = parameters.parameters()
_UpperCAmelCase : Dict = list(lowerCamelCase__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCAmelCase : Tuple = self.get_decay(self.optimization_step )
_UpperCAmelCase : Any = decay
_UpperCAmelCase : Optional[Any] = 1 - decay
_UpperCAmelCase : Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCAmelCase : str = deepspeed.zero.GatheredParameters(lowerCamelCase__ , modifier_rank=lowerCamelCase__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[str] = list(lowerCamelCase__ )
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[int]=None ) ->None:
'''simple docstring'''
_UpperCAmelCase : str = [
p.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ ) if p.is_floating_point() else p.to(device=lowerCamelCase__ )
for p in self.shadow_params
]
def lowerCAmelCase__ ( self : List[Any] ) ->dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , lowerCamelCase__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCAmelCase : int = None
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : dict ) ->None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = copy.deepcopy(lowerCamelCase__ )
_UpperCAmelCase : List[str] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_UpperCAmelCase : Union[str, Any] = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , lowerCamelCase__ ):
raise ValueError("Invalid min_decay" )
_UpperCAmelCase : List[str] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCamelCase__ ):
raise ValueError("Invalid optimization_step" )
_UpperCAmelCase : List[Any] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCamelCase__ ):
raise ValueError("Invalid update_after_step" )
_UpperCAmelCase : str = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCamelCase__ ):
raise ValueError("Invalid use_ema_warmup" )
_UpperCAmelCase : int = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
_UpperCAmelCase : Any = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
_UpperCAmelCase : List[str] = state_dict.get("shadow_params" , lowerCamelCase__ )
if shadow_params is not None:
_UpperCAmelCase : Optional[Any] = shadow_params
if not isinstance(self.shadow_params , lowerCamelCase__ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(lowerCamelCase__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 40
| 1
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "pixel_values"
lowerCAmelCase : Dict = False
lowerCAmelCase : Union[str, Any] = TimmBackboneConfig
def __init__( self : List[str] , lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
requires_backends(self , "timm" )
super().__init__(lowerCamelCase__ )
_UpperCAmelCase : Any = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
_UpperCAmelCase : Optional[Any] = getattr(lowerCamelCase__ , "use_pretrained_backbone" , lowerCamelCase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
_UpperCAmelCase : int = config.out_indices if getattr(lowerCamelCase__ , "out_indices" , lowerCamelCase__ ) is not None else (-1,)
_UpperCAmelCase : List[Any] = timm.create_model(
config.backbone , pretrained=lowerCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCamelCase__ , **lowerCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_UpperCAmelCase : List[str] = self._backbone.return_layers
_UpperCAmelCase : Optional[int] = {layer["module"]: str(lowerCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
_UpperCAmelCase : Any = kwargs.pop("config" , TimmBackboneConfig() )
_UpperCAmelCase : Dict = kwargs.pop("use_timm_backbone" , lowerCamelCase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
_UpperCAmelCase : str = kwargs.pop("num_channels" , config.num_channels )
_UpperCAmelCase : Dict = kwargs.pop("features_only" , config.features_only )
_UpperCAmelCase : str = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
_UpperCAmelCase : Optional[Any] = kwargs.pop("out_indices" , config.out_indices )
_UpperCAmelCase : Dict = TimmBackboneConfig(
backbone=lowerCamelCase__ , num_channels=lowerCamelCase__ , features_only=lowerCamelCase__ , use_pretrained_backbone=lowerCamelCase__ , out_indices=lowerCamelCase__ , )
return super()._from_config(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : Dict ) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
_UpperCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Dict = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_UpperCAmelCase : Optional[int] = self._all_layers
_UpperCAmelCase : List[str] = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self._return_layers
_UpperCAmelCase : Tuple = tuple(hidden_states[i] for i in self.out_indices )
else:
_UpperCAmelCase : Any = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Dict = tuple(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tuple(lowerCamelCase__ ) if hidden_states is not None else None
if not return_dict:
_UpperCAmelCase : Dict = (feature_maps,)
if output_hidden_states:
_UpperCAmelCase : List[str] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase__ , hidden_states=lowerCamelCase__ , attentions=lowerCamelCase__ )
| 40
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
lowerCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowerCamelCase__ = state_dict['cls.predictions.decoder.weight']
lowerCamelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.dense.{w}''']
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 40
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 40
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : dict[str, list[str]] , lowerCamelCase__ : str ) ->None:
'''simple docstring'''
_UpperCAmelCase : Dict = graph
# mapping node to its parent in resulting breadth first tree
_UpperCAmelCase : dict[str, str | None] = {}
_UpperCAmelCase : List[Any] = source_vertex
def lowerCAmelCase__ ( self : Optional[int] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {self.source_vertex}
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[str] = [self.source_vertex] # first in first out queue
while queue:
_UpperCAmelCase : int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = vertex
queue.append(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
_UpperCAmelCase : int = self.parent.get(lowerCamelCase__ )
if target_vertex_parent is None:
_UpperCAmelCase : Tuple = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(lowerCamelCase__ )
return self.shortest_path(lowerCamelCase__ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 40
| 1
|
'''simple docstring'''
class lowerCAmelCase__ :
def __init__( self : Dict , lowerCamelCase__ : int ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = n
_UpperCAmelCase : List[str] = [None] * self.n
_UpperCAmelCase : List[str] = 0 # index of the first element
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : List[Any] = 0
def __len__( self : Optional[int] ) ->int:
'''simple docstring'''
return self.size
def lowerCAmelCase__ ( self : Any ) ->bool:
'''simple docstring'''
return self.size == 0
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
_UpperCAmelCase : List[str] = data
_UpperCAmelCase : Dict = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCAmelCase__ ( self : Any ) ->int:
'''simple docstring'''
if self.size == 0:
raise Exception("UNDERFLOW" )
_UpperCAmelCase : Dict = self.array[self.front]
_UpperCAmelCase : str = None
_UpperCAmelCase : List[Any] = (self.front + 1) % self.n
self.size -= 1
return temp
| 40
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = ["image_processor", "tokenizer"]
lowerCAmelCase : List[Any] = "BlipImageProcessor"
lowerCAmelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = False
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.image_processor
def __call__( self : Dict , lowerCamelCase__ : ImageInput = None , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Tuple , ) ->BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_UpperCAmelCase : Optional[int] = self.tokenizer
_UpperCAmelCase : List[Any] = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
return text_encoding
# add pixel_values
_UpperCAmelCase : Optional[int] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
if text is not None:
_UpperCAmelCase : Dict = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
else:
_UpperCAmelCase : int = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def lowerCAmelCase__ ( self : List[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.tokenizer.model_input_names
_UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 40
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : int = IFPipeline
lowerCAmelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
lowerCAmelCase : str = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase : str = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
return self._get_dummy_components()
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any]=0 ) ->Any:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith("mps" ):
_UpperCAmelCase : Tuple = torch.manual_seed(lowerCamelCase__ )
else:
_UpperCAmelCase : int = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowerCAmelCase__ ( self : List[str] ) ->List[str]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self : Any ) ->Dict:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
self._test_save_load_local()
def lowerCAmelCase__ ( self : Optional[Any] ) ->str:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : Optional[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Tuple = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
_UpperCAmelCase : Tuple = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
_UpperCAmelCase , _UpperCAmelCase : Tuple = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_UpperCAmelCase : Any = None
_UpperCAmelCase : Optional[int] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_UpperCAmelCase : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
_UpperCAmelCase : str = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_UpperCAmelCase : Any = IFInpaintingPipeline(**pipe_a.components )
_UpperCAmelCase : List[Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
_start_torch_memory_measurement()
_UpperCAmelCase : str = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = pipe_a(
prompt_embeds=lowerCamelCase__ , negative_prompt_embeds=lowerCamelCase__ , num_inference_steps=2 , generator=lowerCamelCase__ , output_type="np" , )
_UpperCAmelCase : Dict = output.images[0]
assert image.shape == (64, 64, 3)
_UpperCAmelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_UpperCAmelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
_UpperCAmelCase : str = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase__ )
_UpperCAmelCase : Any = pipe_a(
prompt_embeds=lowerCamelCase__ , negative_prompt_embeds=lowerCamelCase__ , image=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase : Any = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_UpperCAmelCase : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_UpperCAmelCase : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any] ) ->str:
'''simple docstring'''
_start_torch_memory_measurement()
_UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = pipe_a(
prompt_embeds=lowerCamelCase__ , negative_prompt_embeds=lowerCamelCase__ , image=lowerCamelCase__ , num_inference_steps=2 , generator=lowerCamelCase__ , output_type="np" , )
_UpperCAmelCase : Tuple = output.images[0]
assert image.shape == (64, 64, 3)
_UpperCAmelCase : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_UpperCAmelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
_UpperCAmelCase : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase : Tuple = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = pipe_a(
prompt_embeds=lowerCamelCase__ , negative_prompt_embeds=lowerCamelCase__ , image=lowerCamelCase__ , original_image=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_UpperCAmelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_UpperCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple ) ->Optional[int]:
'''simple docstring'''
_start_torch_memory_measurement()
_UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase__ )
_UpperCAmelCase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowerCamelCase__ )
_UpperCAmelCase : int = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase : Any = pipe_a(
prompt_embeds=lowerCamelCase__ , negative_prompt_embeds=lowerCamelCase__ , image=lowerCamelCase__ , mask_image=lowerCamelCase__ , num_inference_steps=2 , generator=lowerCamelCase__ , output_type="np" , )
_UpperCAmelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
_UpperCAmelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_UpperCAmelCase : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
_UpperCAmelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase__ )
_UpperCAmelCase : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(lowerCamelCase__ )
_UpperCAmelCase : Tuple = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(lowerCamelCase__ )
_UpperCAmelCase : Tuple = pipe_a(
prompt_embeds=lowerCamelCase__ , negative_prompt_embeds=lowerCamelCase__ , image=lowerCamelCase__ , mask_image=lowerCamelCase__ , original_image=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase : Any = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_UpperCAmelCase : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_UpperCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
def __lowerCAmelCase ():
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 40
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ): # noqa: E741
_UpperCAmelCase : List[str] = len(__lowerCAmelCase )
_UpperCAmelCase : str = 0
_UpperCAmelCase : List[str] = [0] * n
_UpperCAmelCase : int = [False] * n
_UpperCAmelCase : Dict = [False] * n
def dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if parent == root:
out_edge_count += 1
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : str = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_UpperCAmelCase : List[str] = dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Tuple = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_UpperCAmelCase : Dict = True
# AP found via cycle
if at == low[to]:
_UpperCAmelCase : Dict = True
else:
_UpperCAmelCase : Optional[int] = min(low[at] , __lowerCAmelCase )
return out_edge_count
for i in range(__lowerCAmelCase ):
if not visited[i]:
_UpperCAmelCase : str = 0
_UpperCAmelCase : Tuple = dfs(__lowerCAmelCase , __lowerCAmelCase , -1 , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = out_edge_count > 1
for x in range(len(__lowerCAmelCase ) ):
if is_art[x] is True:
print(__lowerCAmelCase )
# Adjacency list of graph
lowerCamelCase__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 40
| 1
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __lowerCAmelCase (__lowerCAmelCase ):
if is_torch_version("<" , "2.0.0" ) or not hasattr(__lowerCAmelCase , "_dynamo" ):
return False
return isinstance(__lowerCAmelCase , torch._dynamo.eval_frame.OptimizedModule )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = True ):
_UpperCAmelCase : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_UpperCAmelCase : Optional[Any] = is_compiled_module(__lowerCAmelCase )
if is_compiled:
_UpperCAmelCase : Optional[Any] = model
_UpperCAmelCase : Tuple = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[Any] = model.module
if not keep_fpaa_wrapper:
_UpperCAmelCase : Tuple = getattr(__lowerCAmelCase , "forward" )
_UpperCAmelCase : Tuple = model.__dict__.pop("_original_forward" , __lowerCAmelCase )
if original_forward is not None:
while hasattr(__lowerCAmelCase , "__wrapped__" ):
_UpperCAmelCase : List[Any] = forward.__wrapped__
if forward == original_forward:
break
_UpperCAmelCase : Optional[Any] = forward
if getattr(__lowerCAmelCase , "_converted_to_transformer_engine" , __lowerCAmelCase ):
convert_model(__lowerCAmelCase , to_transformer_engine=__lowerCAmelCase )
if is_compiled:
_UpperCAmelCase : str = model
_UpperCAmelCase : Optional[Any] = compiled_model
return model
def __lowerCAmelCase ():
PartialState().wait_for_everyone()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__lowerCAmelCase , __lowerCAmelCase )
elif PartialState().local_process_index == 0:
torch.save(__lowerCAmelCase , __lowerCAmelCase )
@contextmanager
def __lowerCAmelCase (**__lowerCAmelCase ):
for key, value in kwargs.items():
_UpperCAmelCase : int = str(__lowerCAmelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __lowerCAmelCase (__lowerCAmelCase ):
if not hasattr(__lowerCAmelCase , "__qualname__" ) and not hasattr(__lowerCAmelCase , "__name__" ):
_UpperCAmelCase : Dict = getattr(__lowerCAmelCase , "__class__" , __lowerCAmelCase )
if hasattr(__lowerCAmelCase , "__qualname__" ):
return obj.__qualname__
if hasattr(__lowerCAmelCase , "__name__" ):
return obj.__name__
return str(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
for key, value in source.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = destination.setdefault(__lowerCAmelCase , {} )
merge_dicts(__lowerCAmelCase , __lowerCAmelCase )
else:
_UpperCAmelCase : str = value
return destination
def __lowerCAmelCase (__lowerCAmelCase = None ):
if port is None:
_UpperCAmelCase : Union[str, Any] = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 40
|
'''simple docstring'''
def __lowerCAmelCase ():
_UpperCAmelCase : str = 0
for i in range(1 , 1_001 ):
total += i**i
return str(__lowerCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 40
| 1
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __lowerCAmelCase (__lowerCAmelCase ):
return np.maximum(0 , __lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 40
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if dataset.ndim != value_array.ndim:
_UpperCAmelCase : Optional[Any] = (
"Wrong input data's dimensions... "
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCAmelCase : Optional[int] = (
"Wrong input data's shape... "
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_UpperCAmelCase : Union[str, Any] = (
"Input data have different datatype... "
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = []
for value in value_array:
_UpperCAmelCase : List[str] = euclidean(__lowerCAmelCase , dataset[0] )
_UpperCAmelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCAmelCase : int = euclidean(__lowerCAmelCase , __lowerCAmelCase )
if dist > temp_dist:
_UpperCAmelCase : Tuple = temp_dist
_UpperCAmelCase : Dict = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return np.dot(__lowerCAmelCase , __lowerCAmelCase ) / (norm(__lowerCAmelCase ) * norm(__lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_UpperCAmelCase : str = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" , revision=__lowerCAmelCase )
| 40
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase__ = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
_UpperCAmelCase : Optional[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowerCamelCase__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any=None ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCAmelCase : Tuple = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCAmelCase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_UpperCAmelCase : Tuple = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowerCamelCase__ , "w" , newline="\n" ) as f:
f.write(lowerCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__ )
with open(lowerCamelCase__ , "r" ) as f:
self.assertTrue(f.read() , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowerCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
# Copy consistency with a really long name
_UpperCAmelCase : int = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , lowerCamelCase__ , lowerCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowerCamelCase__ , overwrite_result=re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
| 40
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Tuple = "decision_transformer"
lowerCAmelCase : Dict = ["past_key_values"]
lowerCAmelCase : int = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , lowerCamelCase__ : List[Any]=17 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Tuple=1_28 , lowerCamelCase__ : str=40_96 , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[str]=1 , lowerCamelCase__ : List[str]=10_24 , lowerCamelCase__ : List[str]=3 , lowerCamelCase__ : Optional[Any]=1 , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : Optional[int]="relu" , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : List[Any]=1E-5 , lowerCamelCase__ : List[str]=0.0_2 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : str=True , lowerCamelCase__ : Union[str, Any]=5_02_56 , lowerCamelCase__ : Any=5_02_56 , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Tuple=False , **lowerCamelCase__ : Dict , ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = state_dim
_UpperCAmelCase : List[Any] = act_dim
_UpperCAmelCase : Dict = hidden_size
_UpperCAmelCase : Union[str, Any] = max_ep_len
_UpperCAmelCase : Dict = action_tanh
_UpperCAmelCase : List[str] = vocab_size
_UpperCAmelCase : Any = n_positions
_UpperCAmelCase : Tuple = n_layer
_UpperCAmelCase : List[str] = n_head
_UpperCAmelCase : str = n_inner
_UpperCAmelCase : int = activation_function
_UpperCAmelCase : Optional[int] = resid_pdrop
_UpperCAmelCase : Any = embd_pdrop
_UpperCAmelCase : Union[str, Any] = attn_pdrop
_UpperCAmelCase : Optional[Any] = layer_norm_epsilon
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : str = scale_attn_weights
_UpperCAmelCase : Tuple = use_cache
_UpperCAmelCase : str = scale_attn_by_inverse_layer_idx
_UpperCAmelCase : Union[str, Any] = reorder_and_upcast_attn
_UpperCAmelCase : List[Any] = bos_token_id
_UpperCAmelCase : int = eos_token_id
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
| 40
|
'''simple docstring'''
from math import factorial
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = real
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Any = [1] * rank
else:
_UpperCAmelCase : Dict = rank
def __repr__( self : str ) ->List[str]:
'''simple docstring'''
return (
F"""{self.real}+"""
F"""{'+'.join(str(lowerCamelCase__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCamelCase__ )
def __add__( self : Dict , lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return Dual(self.real + other , self.duals )
_UpperCAmelCase : Optional[int] = self.duals.copy()
_UpperCAmelCase : Optional[int] = other.duals.copy()
if len(lowerCamelCase__ ) > len(lowerCamelCase__ ):
o_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
elif len(lowerCamelCase__ ) < len(lowerCamelCase__ ):
s_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
_UpperCAmelCase : Union[str, Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCamelCase__ )
lowerCAmelCase : Tuple = __add__
def __sub__( self : List[Any] , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
return self + other * -1
def __mul__( self : List[str] , lowerCamelCase__ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = __mul__
def __truediv__( self : Optional[Any] , lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCamelCase__ )
raise ValueError
def __floordiv__( self : str , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Tuple = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCamelCase__ )
raise ValueError
def __pow__( self : Tuple , lowerCamelCase__ : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
if n < 0 or isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
_UpperCAmelCase : str = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not callable(__lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
_UpperCAmelCase : int = Dual(__lowerCAmelCase , 1 )
_UpperCAmelCase : Optional[int] = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCAmelCase (__lowerCAmelCase ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 40
| 1
|
'''simple docstring'''
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[Any] = nn.functional.normalize(__lowerCAmelCase )
_UpperCAmelCase : List[str] = nn.functional.normalize(__lowerCAmelCase )
return torch.mm(__lowerCAmelCase , normalized_text_embeds.t() )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Optional[Any] = CLIPConfig
lowerCAmelCase : Dict = ["CLIPEncoderLayer"]
def __init__( self : str , lowerCamelCase__ : CLIPConfig ) ->Dict:
'''simple docstring'''
super().__init__(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = CLIPVisionModel(config.vision_config )
_UpperCAmelCase : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCamelCase__ )
_UpperCAmelCase : Any = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCamelCase__ )
_UpperCAmelCase : Dict = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCamelCase__ )
_UpperCAmelCase : str = nn.Parameter(torch.ones(17 ) , requires_grad=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=lowerCamelCase__ )
@torch.no_grad()
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.vision_model(lowerCamelCase__ )[1] # pooled_output
_UpperCAmelCase : Union[str, Any] = self.visual_projection(lowerCamelCase__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_UpperCAmelCase : List[Any] = cosine_distance(lowerCamelCase__ , self.special_care_embeds ).cpu().float().numpy()
_UpperCAmelCase : str = cosine_distance(lowerCamelCase__ , self.concept_embeds ).cpu().float().numpy()
_UpperCAmelCase : str = []
_UpperCAmelCase : Any = image_embeds.shape[0]
for i in range(lowerCamelCase__ ):
_UpperCAmelCase : List[Any] = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_UpperCAmelCase : List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_UpperCAmelCase : Dict = special_cos_dist[i][concept_idx]
_UpperCAmelCase : str = self.special_care_embeds_weights[concept_idx].item()
_UpperCAmelCase : List[str] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
_UpperCAmelCase : Tuple = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
_UpperCAmelCase : Union[str, Any] = cos_dist[i][concept_idx]
_UpperCAmelCase : int = self.concept_embeds_weights[concept_idx].item()
_UpperCAmelCase : List[Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCamelCase__ )
result.append(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : torch.FloatTensor ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.vision_model(lowerCamelCase__ )[1] # pooled_output
_UpperCAmelCase : Optional[Any] = self.visual_projection(lowerCamelCase__ )
_UpperCAmelCase : Any = cosine_distance(lowerCamelCase__ , self.special_care_embeds )
_UpperCAmelCase : Optional[int] = cosine_distance(lowerCamelCase__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_UpperCAmelCase : Union[str, Any] = 0.0
_UpperCAmelCase : List[str] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_UpperCAmelCase : int = torch.any(special_scores > 0 , dim=1 )
_UpperCAmelCase : Dict = special_care * 0.0_1
_UpperCAmelCase : Union[str, Any] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_UpperCAmelCase : str = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_UpperCAmelCase : List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 40
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 40
| 1
|
'''simple docstring'''
import math
import random
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCamelCase__ = 0.02
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__lowerCAmelCase ):
# Forward propagation
_UpperCAmelCase : int = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
_UpperCAmelCase : int = (expected / 100) - layer_a
# Error delta
_UpperCAmelCase : Union[str, Any] = layer_1_error * sigmoid_function(__lowerCAmelCase , __lowerCAmelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = int(input('Expected value: '))
lowerCamelCase__ = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 40
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : str=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Tuple=99 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Optional[int]=4 , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : int = use_attention_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Union[str, Any] = num_choices
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_attention_mask:
_UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
if self.use_token_type_ids:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Tuple = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Any = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : str = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )[0]
_UpperCAmelCase : int = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
_UpperCAmelCase : int = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
_UpperCAmelCase : str = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 40
| 1
|
'''simple docstring'''
def __lowerCAmelCase ():
for n in range(1 , 1_000_000 ):
yield n * (n + 1) // 2
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : Any = 2
while i * i <= n:
_UpperCAmelCase : Dict = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __lowerCAmelCase ():
return next(i for i in triangle_number_generator() if count_divisors(__lowerCAmelCase ) > 500 )
if __name__ == "__main__":
print(solution())
| 40
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 40
| 1
|
'''simple docstring'''
from timeit import timeit
lowerCamelCase__ = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : List[str] = len(__lowerCAmelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = len(__lowerCAmelCase ) // 2
_UpperCAmelCase : Optional[Any] = len(__lowerCAmelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(__lowerCAmelCase ) )
def __lowerCAmelCase (__lowerCAmelCase ):
if len(__lowerCAmelCase ) <= 2:
return True
if s[0] == s[len(__lowerCAmelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def __lowerCAmelCase (__lowerCAmelCase ):
return s == s[::-1]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = F"""all({name}(key) is value for key, value in test_data.items())"""
_UpperCAmelCase : List[Any] = F"""from __main__ import test_data, {name}"""
_UpperCAmelCase : Optional[Any] = 500_000
_UpperCAmelCase : Dict = timeit(stmt=__lowerCAmelCase , setup=__lowerCAmelCase , number=__lowerCAmelCase )
print(F"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F'''{key:21} {value}''')
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 40
|
'''simple docstring'''
import os
def __lowerCAmelCase ():
_UpperCAmelCase : List[Any] = os.path.join(os.path.dirname(__lowerCAmelCase ) , "num.txt" )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 40
| 1
|
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = 0
if start < end:
_UpperCAmelCase : Tuple = randint(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = a[end]
_UpperCAmelCase : List[Any] = a[pivot]
_UpperCAmelCase : int = temp
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = _in_place_partition(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
count += _in_place_quick_sort(__lowerCAmelCase , __lowerCAmelCase , p - 1 )
count += _in_place_quick_sort(__lowerCAmelCase , p + 1 , __lowerCAmelCase )
return count
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : Tuple = randint(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = a[end]
_UpperCAmelCase : int = a[pivot]
_UpperCAmelCase : Optional[Any] = temp
_UpperCAmelCase : Union[str, Any] = start - 1
for index in range(__lowerCAmelCase , __lowerCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_UpperCAmelCase : Optional[int] = new_pivot_index + 1
_UpperCAmelCase : Any = a[new_pivot_index]
_UpperCAmelCase : int = a[index]
_UpperCAmelCase : int = temp
_UpperCAmelCase : Optional[Any] = a[new_pivot_index + 1]
_UpperCAmelCase : Optional[Any] = a[end]
_UpperCAmelCase : Tuple = temp
return new_pivot_index + 1, count
lowerCamelCase__ = TemporaryFile()
lowerCamelCase__ = 100 # 1000 elements are to be sorted
lowerCamelCase__ ,lowerCamelCase__ = 0, 1 # mean and standard deviation
lowerCamelCase__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
lowerCamelCase__ = np.load(outfile)
lowerCamelCase__ = len(M) - 1
lowerCamelCase__ = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 40
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase__ = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int=1 ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = tokenizer
_UpperCAmelCase : Tuple = dataset
_UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ ) if n_tasks is None else n_tasks
_UpperCAmelCase : Any = n_copies
def __iter__( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
_UpperCAmelCase : Optional[Any] = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = start_length
_UpperCAmelCase : Union[str, Any] = eof_strings
_UpperCAmelCase : Union[str, Any] = tokenizer
def __call__( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_UpperCAmelCase : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase__ )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = re.split("(%s)" % "|".join(__lowerCAmelCase ) , __lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=20 , **__lowerCAmelCase ):
_UpperCAmelCase : Tuple = defaultdict(__lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowerCAmelCase ) ):
with torch.no_grad():
_UpperCAmelCase : Tuple = batch["ids"].shape[-1]
_UpperCAmelCase : Optional[int] = accelerator.unwrap_model(__lowerCAmelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=__lowerCAmelCase , **__lowerCAmelCase )
# each task is generated batch_size times
_UpperCAmelCase : str = batch["task_id"].repeat(__lowerCAmelCase )
_UpperCAmelCase : str = accelerator.pad_across_processes(
__lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
_UpperCAmelCase , _UpperCAmelCase : int = accelerator.gather((generated_tokens, generated_tasks) )
_UpperCAmelCase : Dict = generated_tokens.cpu().numpy()
_UpperCAmelCase : Dict = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowerCAmelCase , __lowerCAmelCase ):
gen_token_dict[task].append(__lowerCAmelCase )
_UpperCAmelCase : int = [[] for _ in range(__lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_UpperCAmelCase : List[Any] = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
code_gens[task].append(remove_last_block(__lowerCAmelCase ) )
return code_gens
def __lowerCAmelCase ():
# Setup configuration
_UpperCAmelCase : List[str] = HfArgumentParser(__lowerCAmelCase )
_UpperCAmelCase : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_UpperCAmelCase : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_UpperCAmelCase : List[str] = "false"
if args.num_workers is None:
_UpperCAmelCase : List[str] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_UpperCAmelCase : List[Any] = Accelerator()
set_seed(args.seed , device_specific=__lowerCAmelCase )
# Load model and tokenizer
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase : List[str] = tokenizer.eos_token
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_UpperCAmelCase : Tuple = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowerCAmelCase , __lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
_UpperCAmelCase : Union[str, Any] = load_dataset("openai_humaneval" )
_UpperCAmelCase : List[Any] = load_metric("code_eval" )
_UpperCAmelCase : Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
_UpperCAmelCase : Any = args.n_samples // args.batch_size
_UpperCAmelCase : Tuple = TokenizedDataset(__lowerCAmelCase , human_eval["test"] , n_copies=__lowerCAmelCase , n_tasks=__lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
_UpperCAmelCase : List[str] = DataLoader(__lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_UpperCAmelCase : Optional[int] = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
_UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Dict = complete_code(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , n_tasks=__lowerCAmelCase , batch_size=args.batch_size , **__lowerCAmelCase , )
if accelerator.is_main_process:
_UpperCAmelCase : List[Any] = []
for task in tqdm(range(__lowerCAmelCase ) ):
_UpperCAmelCase : str = human_eval["test"][task]["test"]
_UpperCAmelCase : Union[str, Any] = F"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
_UpperCAmelCase , _UpperCAmelCase : str = code_eval_metric.compute(
references=__lowerCAmelCase , predictions=__lowerCAmelCase , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 40
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : int = "gptj"
lowerCAmelCase : Tuple = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , lowerCamelCase__ : str=5_04_00 , lowerCamelCase__ : Optional[Any]=20_48 , lowerCamelCase__ : List[str]=40_96 , lowerCamelCase__ : List[str]=28 , lowerCamelCase__ : List[str]=16 , lowerCamelCase__ : List[str]=64 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[int]="gelu_new" , lowerCamelCase__ : Dict=0.0 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : Tuple=1E-5 , lowerCamelCase__ : Tuple=0.0_2 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Tuple=5_02_56 , lowerCamelCase__ : Dict=5_02_56 , lowerCamelCase__ : List[str]=False , **lowerCamelCase__ : Optional[int] , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : List[str] = n_positions
_UpperCAmelCase : List[Any] = n_embd
_UpperCAmelCase : str = n_layer
_UpperCAmelCase : List[Any] = n_head
_UpperCAmelCase : str = n_inner
_UpperCAmelCase : List[Any] = rotary_dim
_UpperCAmelCase : Optional[int] = activation_function
_UpperCAmelCase : Any = resid_pdrop
_UpperCAmelCase : str = embd_pdrop
_UpperCAmelCase : List[str] = attn_pdrop
_UpperCAmelCase : Any = layer_norm_epsilon
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : List[str] = use_cache
_UpperCAmelCase : Dict = bos_token_id
_UpperCAmelCase : str = eos_token_id
super().__init__(
bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , tie_word_embeddings=lowerCamelCase__ , **lowerCamelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : PretrainedConfig , lowerCamelCase__ : str = "default" , lowerCamelCase__ : List[PatchingSpec] = None , lowerCamelCase__ : bool = False , ) ->Optional[int]:
'''simple docstring'''
super().__init__(lowerCamelCase__ , task=lowerCamelCase__ , patching_specs=lowerCamelCase__ , use_past=lowerCamelCase__ )
if not getattr(self._config , "pad_token_id" , lowerCamelCase__ ):
# TODO: how to do that better?
_UpperCAmelCase : Any = 0
@property
def lowerCAmelCase__ ( self : List[str] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__ , direction="inputs" )
_UpperCAmelCase : Optional[Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
_UpperCAmelCase : Dict = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowerCAmelCase__ ( self : List[str] ) ->int:
'''simple docstring'''
return self._config.n_layer
@property
def lowerCAmelCase__ ( self : Dict ) ->int:
'''simple docstring'''
return self._config.n_head
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : PreTrainedTokenizer , lowerCamelCase__ : int = -1 , lowerCamelCase__ : int = -1 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[TensorType] = None , ) ->Mapping[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = super(lowerCamelCase__ , self ).generate_dummy_inputs(
lowerCamelCase__ , batch_size=lowerCamelCase__ , seq_length=lowerCamelCase__ , is_pair=lowerCamelCase__ , framework=lowerCamelCase__ )
# We need to order the input in the way they appears in the forward()
_UpperCAmelCase : Optional[Any] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase : Tuple = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_UpperCAmelCase : int = seqlen + 2
_UpperCAmelCase : Optional[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_UpperCAmelCase : Optional[int] = [
(torch.zeros(lowerCamelCase__ ), torch.zeros(lowerCamelCase__ )) for _ in range(self.num_layers )
]
_UpperCAmelCase : Optional[int] = common_inputs["attention_mask"]
if self.use_past:
_UpperCAmelCase : str = ordered_inputs["attention_mask"].dtype
_UpperCAmelCase : List[str] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowerCamelCase__ , lowerCamelCase__ , dtype=lowerCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase__ ( self : List[str] ) ->int:
'''simple docstring'''
return 13
| 40
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 40
| 1
|
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 40
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_UpperCAmelCase : str = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" , revision=__lowerCAmelCase )
| 40
| 1
|
'''simple docstring'''
import os
import pytest
from attr import dataclass
lowerCamelCase__ = 'us-east-1' # defaults region
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : str
lowerCAmelCase : Union[str, Any] = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
lowerCAmelCase : Any = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5_500,
}
lowerCAmelCase : str = {**hyperparameters, "max_steps": 1_000}
@property
def lowerCAmelCase__ ( self : List[str] ) ->str:
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
return F"""{self.framework}-transfromers-test"""
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
'''simple docstring'''
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 40
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "pixel_values"
lowerCAmelCase : Dict = False
lowerCAmelCase : Union[str, Any] = TimmBackboneConfig
def __init__( self : List[str] , lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
requires_backends(self , "timm" )
super().__init__(lowerCamelCase__ )
_UpperCAmelCase : Any = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
_UpperCAmelCase : Optional[Any] = getattr(lowerCamelCase__ , "use_pretrained_backbone" , lowerCamelCase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
_UpperCAmelCase : int = config.out_indices if getattr(lowerCamelCase__ , "out_indices" , lowerCamelCase__ ) is not None else (-1,)
_UpperCAmelCase : List[Any] = timm.create_model(
config.backbone , pretrained=lowerCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCamelCase__ , **lowerCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_UpperCAmelCase : List[str] = self._backbone.return_layers
_UpperCAmelCase : Optional[int] = {layer["module"]: str(lowerCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
_UpperCAmelCase : Any = kwargs.pop("config" , TimmBackboneConfig() )
_UpperCAmelCase : Dict = kwargs.pop("use_timm_backbone" , lowerCamelCase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
_UpperCAmelCase : str = kwargs.pop("num_channels" , config.num_channels )
_UpperCAmelCase : Dict = kwargs.pop("features_only" , config.features_only )
_UpperCAmelCase : str = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
_UpperCAmelCase : Optional[Any] = kwargs.pop("out_indices" , config.out_indices )
_UpperCAmelCase : Dict = TimmBackboneConfig(
backbone=lowerCamelCase__ , num_channels=lowerCamelCase__ , features_only=lowerCamelCase__ , use_pretrained_backbone=lowerCamelCase__ , out_indices=lowerCamelCase__ , )
return super()._from_config(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : Dict ) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
_UpperCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Dict = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_UpperCAmelCase : Optional[int] = self._all_layers
_UpperCAmelCase : List[str] = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self._return_layers
_UpperCAmelCase : Tuple = tuple(hidden_states[i] for i in self.out_indices )
else:
_UpperCAmelCase : Any = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Dict = tuple(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tuple(lowerCamelCase__ ) if hidden_states is not None else None
if not return_dict:
_UpperCAmelCase : Dict = (feature_maps,)
if output_hidden_states:
_UpperCAmelCase : List[str] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase__ , hidden_states=lowerCamelCase__ , attentions=lowerCamelCase__ )
| 40
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase__ , "width_multiplier" ) )
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Any=13 , lowerCamelCase__ : int=64 , lowerCamelCase__ : Any=2 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : Optional[Any]="swish" , lowerCamelCase__ : Union[str, Any]=3 , lowerCamelCase__ : Any=32 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Union[str, Any]=0.0_2 , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : List[Any]=10 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Dict=0.2_5 , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : int=0.0 , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = parent
_UpperCAmelCase : List[str] = batch_size
_UpperCAmelCase : Union[str, Any] = image_size
_UpperCAmelCase : Optional[int] = patch_size
_UpperCAmelCase : List[str] = num_channels
_UpperCAmelCase : List[str] = make_divisible(5_12 * width_multiplier , divisor=8 )
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : Optional[int] = conv_kernel_size
_UpperCAmelCase : List[Any] = output_stride
_UpperCAmelCase : List[Any] = classifier_dropout_prob
_UpperCAmelCase : Optional[Any] = use_labels
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : Optional[Any] = num_labels
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : Optional[int] = scope
_UpperCAmelCase : Dict = width_multiplier
_UpperCAmelCase : str = ffn_dropout
_UpperCAmelCase : Tuple = attn_dropout
def lowerCAmelCase__ ( self : List[str] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : int = None
_UpperCAmelCase : Any = None
if self.use_labels:
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = MobileViTVaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Optional[int] = MobileViTVaForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.num_labels
_UpperCAmelCase : Optional[int] = MobileViTVaForSemanticSegmentation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_UpperCAmelCase : List[Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Dict = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = config_and_inputs
_UpperCAmelCase : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Dict = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase : Optional[Any] = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : str = False
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = MobileViTVaModelTester(self )
_UpperCAmelCase : List[Any] = MobileViTVaConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def lowerCAmelCase__ ( self : Tuple ) ->Dict:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : str = model_class(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : List[str] = [*signature.parameters.keys()]
_UpperCAmelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : int ):
_UpperCAmelCase : str = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Tuple = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_UpperCAmelCase : Optional[Any] = outputs.hidden_states
_UpperCAmelCase : Tuple = 5
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_UpperCAmelCase : Optional[Any] = 2
for i in range(len(lowerCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : int = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : int = MobileViTVaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __lowerCAmelCase ():
_UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : str ) ->Any:
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Any = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
lowerCamelCase__ )
_UpperCAmelCase : str = self.default_image_processor
_UpperCAmelCase : str = prepare_img()
_UpperCAmelCase : Union[str, Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Optional[Any] = model(**lowerCamelCase__ )
# verify the logits
_UpperCAmelCase : List[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_UpperCAmelCase : Tuple = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Any ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
_UpperCAmelCase : Union[str, Any] = model.to(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
_UpperCAmelCase : List[Any] = outputs.logits
# verify the logits
_UpperCAmelCase : Optional[Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCamelCase__ )
_UpperCAmelCase : int = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=lowerCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Any = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
_UpperCAmelCase : int = model.to(lowerCamelCase__ )
_UpperCAmelCase : Any = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
_UpperCAmelCase : List[str] = prepare_img()
_UpperCAmelCase : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Optional[Any] = model(**lowerCamelCase__ )
_UpperCAmelCase : List[Any] = outputs.logits.detach().cpu()
_UpperCAmelCase : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(50, 60)] )
_UpperCAmelCase : Tuple = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ )
_UpperCAmelCase : Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
| 40
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 40
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.