code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_a = get_tests_dir('fixtures/test_sentencepiece.model')
_a = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
_a = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = CamembertTokenizer
SCREAMING_SNAKE_CASE__ : List[Any] = CamembertTokenizerFast
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : str = True
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : str = CamembertTokenizer(lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = "<pad>"
UpperCAmelCase_ : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowercase_ ) , 1004 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = CamembertTokenizer(lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : List[str] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
UpperCAmelCase_ : Tuple = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : List[str] = tokenizer.encode(lowercase_ )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCAmelCase_ : Any = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
UpperCAmelCase_ : List[Any] = tokenizer.convert_ids_to_tokens(lowercase_ )
UpperCAmelCase_ : List[Any] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
UpperCAmelCase_ : List[str] = self.get_rust_tokenizer()
UpperCAmelCase_ : str = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : List[str] = tokenizer.tokenize(lowercase_ )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
UpperCAmelCase_ : Any = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = tokenizer.encode(lowercase_ )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
# fmt: off
UpperCAmelCase_ : int = {"input_ids": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
UpperCAmelCase_ : Tuple = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=lowercase_ , )
| 61
|
"""simple docstring"""
def lowercase ( __snake_case : Optional[int] ):
lowercase_ : int = 0
lowercase_ : Optional[Any] = len(__snake_case )
for i in range(n - 1 ):
for j in range(i + 1 , __snake_case ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowercase ( __snake_case : str ):
if len(__snake_case ) <= 1:
return arr, 0
lowercase_ : Optional[Any] = len(__snake_case ) // 2
lowercase_ : List[Any] = arr[0:mid]
lowercase_ : Union[str, Any] = arr[mid:]
lowercase_ , lowercase_ : Tuple = count_inversions_recursive(__snake_case )
lowercase_ , lowercase_ : List[Any] = count_inversions_recursive(__snake_case )
lowercase_ , lowercase_ : List[Any] = _count_cross_inversions(__snake_case , __snake_case )
lowercase_ : List[Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowercase ( __snake_case : str , __snake_case : Optional[int] ):
lowercase_ : Optional[Any] = []
lowercase_ : Any = 0
while i < len(__snake_case ) and j < len(__snake_case ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__snake_case ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__snake_case ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowercase ( ):
lowercase_ : Union[str, Any] = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowercase_ : int = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : Dict = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __snake_case )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowercase_ : Dict = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : Dict = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __snake_case )
# an empty list should also have zero inversions
lowercase_ : List[Any] = []
lowercase_ : Any = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : List[str] = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __snake_case )
if __name__ == "__main__":
main()
| 33
| 0
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_A = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 62
|
"""simple docstring"""
__A : Any = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 33
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ : Optional[int] = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Any = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 63
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A : List[Any] = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33
| 0
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "tokenizer"]
lowercase__ = "ViTImageProcessor"
lowercase__ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self: str, a_: Optional[int]=None, a_: Dict=None, **a_: Optional[Any] ):
'''simple docstring'''
_snake_case : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""", a_, )
_snake_case : Tuple = kwargs.pop("""feature_extractor""" )
_snake_case : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a_, a_ )
def __call__( self: List[str], a_: Union[str, Any]=None, a_: Optional[int]=None, a_: List[str]=None, a_: int=None, **a_: Dict ):
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError("""You have to specify either text, visual prompt or images.""" )
if text is not None and visual_prompt is not None:
raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""" )
if text is not None:
_snake_case : Dict = self.tokenizer(a_, return_tensors=a_, **a_ )
if visual_prompt is not None:
_snake_case : List[Any] = self.image_processor(a_, return_tensors=a_, **a_ )
if images is not None:
_snake_case : Tuple = self.image_processor(a_, return_tensors=a_, **a_ )
if visual_prompt is not None and images is not None:
_snake_case : Any = {
"""pixel_values""": image_features.pixel_values,
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_snake_case : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_snake_case : Optional[Any] = {
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**a_ ), tensor_type=a_ )
def UpperCamelCase_ ( self: List[Any], *a_: Tuple, **a_: Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_, **a_ )
def UpperCamelCase_ ( self: Any, *a_: Union[str, Any], **a_: str ):
'''simple docstring'''
return self.tokenizer.decode(*a_, **a_ )
@property
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""", a_, )
return self.image_processor_class
@property
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""", a_, )
return self.image_processor
| 64
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__A : List[str] = '''examples/'''
__A : int = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__A : Dict = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__A : Optional[int] = '''README.md'''
def lowercase ( __snake_case : int , __snake_case : Any , __snake_case : int ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ : int = f.read()
lowercase_ , lowercase_ : List[str] = REPLACE_PATTERNS[pattern]
lowercase_ : Union[str, Any] = replace.replace('''VERSION''' , __snake_case )
lowercase_ : Optional[Any] = re_pattern.sub(__snake_case , __snake_case )
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__snake_case )
def lowercase ( __snake_case : int ):
for folder, directories, fnames in os.walk(__snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__snake_case , __snake_case ) , __snake_case , pattern='''examples''' )
def lowercase ( __snake_case : Optional[Any] , __snake_case : Optional[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__snake_case , __snake_case , __snake_case )
if not patch:
update_version_in_examples(__snake_case )
def lowercase ( ):
lowercase_ : Union[str, Any] = '''🤗 Transformers currently provides the following architectures'''
lowercase_ : Union[str, Any] = '''1. Want to contribute a new model?'''
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ : List[str] = f.readlines()
# Find the start of the list.
lowercase_ : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase_ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowercase_ : str = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__snake_case )
def lowercase ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowercase_ : List[Any] = f.read()
lowercase_ : List[str] = REPLACE_PATTERNS['''init'''][0].search(__snake_case ).groups()[0]
return packaging.version.parse(__snake_case )
def lowercase ( __snake_case : Optional[Any]=False ):
lowercase_ : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowercase_ : Optional[Any] = default_version.base_version
elif patch:
lowercase_ : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowercase_ : Optional[int] = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowercase_ : int = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__snake_case ) == 0:
lowercase_ : Dict = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__snake_case , patch=__snake_case )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowercase ( ):
lowercase_ : List[Any] = get_version()
lowercase_ : List[str] = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowercase_ : Any = current_version.base_version
# Check with the user we got that right.
lowercase_ : Tuple = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__snake_case ) == 0:
lowercase_ : str = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__snake_case )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__A : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 33
| 0
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
if not is_accelerate_available():
return method
UpperCAmelCase__ = version.parse(accelerate.__version__ ).base_version
if version.parse(__A ) < version.parse("0.17.0" ):
return method
def wrapper(self, *__A, **__A ):
if hasattr(self, "_hf_hook" ) and hasattr(self._hf_hook, "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self, *__A, **__A )
return wrapper
| 65
|
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowercase ( __snake_case : str , __snake_case : str , __snake_case : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
lowercase_ : Union[str, Any] = quote(__snake_case )
return hfh.hf_hub_url(__snake_case , __snake_case , repo_type='''dataset''' , revision=__snake_case )
| 33
| 0
|
"""simple docstring"""
from math import factorial
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(_lowercase ) // (factorial(_lowercase ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
F"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"If a class of 40 students must be arranged into groups of",
F"""4 for group projects, there are {combinations(40, 4)} ways""",
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
F"""are {combinations(10, 3)} ways that first, second and""",
"third place can be awarded.",
)
| 66
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
def __init__( self : int , A : Tuple , A : int=3 , A : List[str]=32 , A : Dict=3 , A : Any=10 , A : Dict=[10, 20, 30, 40] , A : Optional[Any]=[1, 1, 2, 1] , A : Union[str, Any]=True , A : Optional[Any]=True , A : Any="relu" , A : Optional[Any]=3 , A : Tuple=None , ) -> Dict:
lowercase_ : str = parent
lowercase_ : List[Any] = batch_size
lowercase_ : Optional[int] = image_size
lowercase_ : int = num_channels
lowercase_ : int = embeddings_size
lowercase_ : str = hidden_sizes
lowercase_ : List[str] = depths
lowercase_ : Dict = is_training
lowercase_ : int = use_labels
lowercase_ : Any = hidden_act
lowercase_ : List[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Optional[Any] = len(A )
def A ( self : str ) -> Tuple:
lowercase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Union[str, Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def A ( self : Dict ) -> int:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A ( self : str , A : Tuple , A : str , A : str ) -> str:
lowercase_ : str = TFResNetModel(config=A )
lowercase_ : Union[str, Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : Any , A : int , A : List[Any] , A : Optional[Any] ) -> Optional[Any]:
lowercase_ : Tuple = self.num_labels
lowercase_ : Union[str, Any] = TFResNetForImageClassification(A )
lowercase_ : Tuple = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Union[str, Any] ) -> Tuple:
lowercase_ : Tuple = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Dict = config_and_inputs
lowercase_ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : List[Any] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Any = False
def A ( self : Union[str, Any] ) -> List[Any]:
lowercase_ : int = TFResNetModelTester(self )
lowercase_ : str = ConfigTester(self , config_class=A , has_text_modality=A )
def A ( self : Dict ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Dict ) -> List[Any]:
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def A ( self : Any ) -> Any:
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def A ( self : List[str] ) -> Optional[Any]:
pass
def A ( self : str ) -> Tuple:
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : int = model_class(A )
lowercase_ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : str = [*signature.parameters.keys()]
lowercase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def A ( self : List[str] ) -> Tuple:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A ( self : List[Any] ) -> List[str]:
def check_hidden_states_output(A : Union[str, Any] , A : int , A : List[Any] ):
lowercase_ : int = model_class(A )
lowercase_ : Optional[Any] = model(**self._prepare_for_class(A , A ) )
lowercase_ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Any = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Union[str, Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : List[str] = layer_type
lowercase_ : Tuple = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[Any] = True
check_hidden_states_output(A , A , A )
def A ( self : Optional[int] ) -> Tuple:
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def A ( self : List[str] ) -> Optional[int]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Tuple = TFResNetModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase ( ):
lowercase_ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def A ( self : Any ) -> Optional[int]:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : Any ) -> Optional[int]:
lowercase_ : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase_ : List[Any] = self.default_image_processor
lowercase_ : Dict = prepare_img()
lowercase_ : List[str] = image_processor(images=A , return_tensors='''tf''' )
# forward pass
lowercase_ : Tuple = model(**A )
# verify the logits
lowercase_ : Optional[int] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A )
lowercase_ : Optional[Any] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , A , atol=1e-4 ) )
| 33
| 0
|
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ = 1_00_00_00 ) -> int:
__lowerCamelCase = set(range(3 , UpperCamelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCamelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCamelCase__ , UpperCamelCase__ ) ) )
__lowerCamelCase = [float(UpperCamelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCamelCase__ , limit + 1 , UpperCamelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 67
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__A : Dict = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class _UpperCAmelCase ( unittest.TestCase , _A ):
def A ( self : List[Any] ) -> Dict:
lowercase_ : Optional[int] = load_tool('''text-question-answering''' )
self.tool.setup()
lowercase_ : Union[str, Any] = load_tool('''text-question-answering''' , remote=A )
def A ( self : Any ) -> List[str]:
lowercase_ : Union[str, Any] = self.tool(A , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : str ) -> List[str]:
lowercase_ : int = self.remote_tool(A , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : List[Any] ) -> int:
lowercase_ : Optional[Any] = self.tool(text=A , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : List[str] ) -> Optional[int]:
lowercase_ : int = self.remote_tool(text=A , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
| 33
| 0
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class a__ ( snake_case ):
"""simple docstring"""
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = tempfile.mkdtemp()
A__ = 8
# DPR tok
A__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
A__ = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(lowercase , exist_ok=lowercase )
A__ = os.path.join(lowercase , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
A__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
A__ = dict(zip(lowercase , range(len(lowercase ) ) ) )
A__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
A__ = {"unk_token": "<unk>"}
A__ = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(lowercase , exist_ok=lowercase )
A__ = os.path.join(lowercase , BART_VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(lowercase , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase ) )
def UpperCamelCase ( self ) -> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCamelCase ( self ) -> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = os.path.join(self.tmpdirname , "rag_tokenizer" )
A__ = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
A__ = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(lowercase )
rag_tokenizer.save_pretrained(lowercase )
A__ = RagTokenizer.from_pretrained(lowercase , config=lowercase )
self.assertIsInstance(new_rag_tokenizer.question_encoder , lowercase )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , lowercase )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = RagTokenizer.from_pretrained("facebook/rag-token-nq" )
A__ = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
A__ = tokenizer(lowercase )
self.assertIsNotNone(lowercase )
@slow
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" )
A__ = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
A__ = tokenizer(lowercase )
self.assertIsNotNone(lowercase )
| 68
|
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _UpperCAmelCase ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self : Any , A : int=None , **A : str ) -> Union[str, Any]:
super().__init__(features=A )
lowercase_ : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def A ( self : Dict , A : int ) -> List[Any]:
import torch
if isinstance(A , A ) and column:
if all(
isinstance(A , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(A )
return column
def A ( self : int , A : Any ) -> Optional[Any]:
import torch
if isinstance(A , (str, bytes, type(A )) ):
return value
elif isinstance(A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase_ : Any = {}
if isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowercase_ : Any = {'''dtype''': torch.intaa}
elif isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase_ : Dict = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A , PIL.Image.Image ):
lowercase_ : Dict = np.asarray(A )
return torch.tensor(A , **{**default_dtype, **self.torch_tensor_kwargs} )
def A ( self : Union[str, Any] , A : Optional[int] ) -> str:
import torch
# support for torch, tf, jax etc.
if hasattr(A , '''__array__''' ) and not isinstance(A , torch.Tensor ):
lowercase_ : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
elif isinstance(A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
return self._tensorize(A )
def A ( self : Dict , A : dict ) -> Tuple:
return map_nested(self._recursive_tensorize , A , map_list=A )
def A ( self : str , A : pa.Table ) -> Mapping:
lowercase_ : Optional[Any] = self.numpy_arrow_extractor().extract_row(A )
lowercase_ : str = self.python_features_decoder.decode_row(A )
return self.recursive_tensorize(A )
def A ( self : List[Any] , A : pa.Table ) -> "torch.Tensor":
lowercase_ : List[str] = self.numpy_arrow_extractor().extract_column(A )
lowercase_ : str = self.python_features_decoder.decode_column(A , pa_table.column_names[0] )
lowercase_ : Optional[int] = self.recursive_tensorize(A )
lowercase_ : Any = self._consolidate(A )
return column
def A ( self : List[str] , A : pa.Table ) -> Mapping:
lowercase_ : Optional[int] = self.numpy_arrow_extractor().extract_batch(A )
lowercase_ : int = self.python_features_decoder.decode_batch(A )
lowercase_ : Dict = self.recursive_tensorize(A )
for column_name in batch:
lowercase_ : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 33
| 0
|
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, ) -> List[str]:
snake_case_ = parent
snake_case_ = 13
snake_case_ = 7
snake_case_ = 30
snake_case_ = self.seq_length + self.mem_len
snake_case_ = 15
snake_case_ = True
snake_case_ = True
snake_case_ = 99
snake_case_ = [10, 50, 80]
snake_case_ = 32
snake_case_ = 32
snake_case_ = 4
snake_case_ = 8
snake_case_ = 128
snake_case_ = 2
snake_case_ = 2
snake_case_ = None
snake_case_ = 1
snake_case_ = 0
snake_case_ = 3
snake_case_ = self.vocab_size - 1
snake_case_ = 0.01
def a_ ( self) -> int:
snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
snake_case_ = TransfoXLConfig(
vocab_size=self.vocab_size, mem_len=self.mem_len, clamp_len=self.clamp_len, cutoffs=self.cutoffs, d_model=self.hidden_size, d_embed=self.d_embed, n_head=self.num_attention_heads, d_head=self.d_head, d_inner=self.d_inner, div_val=self.div_val, n_layer=self.num_hidden_layers, eos_token_id=self.eos_token_id, pad_token_id=self.vocab_size - 1, init_range=self.init_range, num_labels=self.num_labels, )
return (config, input_ids_a, input_ids_a, lm_labels)
def a_ ( self) -> int:
random.seed(self.seed)
tf.random.set_seed(self.seed)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
snake_case_ = TFTransfoXLModel(lowerCAmelCase__)
snake_case_ , snake_case_ = model(lowerCAmelCase__).to_tuple()
snake_case_ = {'input_ids': input_ids_a, 'mems': mems_a}
snake_case_ , snake_case_ = model(lowerCAmelCase__).to_tuple()
self.parent.assertEqual(hidden_states_a.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(hidden_states_a.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, )
self.parent.assertListEqual(
[mem.shape for mem in mems_a], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, )
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = TFTransfoXLLMHeadModel(lowerCAmelCase__)
snake_case_ , snake_case_ = model(lowerCAmelCase__).to_tuple()
snake_case_ = {'input_ids': input_ids_a, 'labels': lm_labels}
snake_case_ , snake_case_ = model(lowerCAmelCase__).to_tuple()
snake_case_ , snake_case_ = model([input_ids_a, mems_a]).to_tuple()
snake_case_ = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels}
snake_case_ , snake_case_ = model(lowerCAmelCase__).to_tuple()
self.parent.assertEqual(lm_logits_a.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, )
self.parent.assertEqual(lm_logits_a.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, )
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> str:
snake_case_ = TFTransfoXLForSequenceClassification(lowerCAmelCase__)
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def a_ ( self) -> Optional[int]:
snake_case_ = self.prepare_config_and_inputs()
((snake_case_) , (snake_case_) , (snake_case_) , (snake_case_)) = config_and_inputs
snake_case_ = {'input_ids': input_ids_a}
return config, inputs_dict
@require_tf
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE_ = () if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Dict:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def a_ ( self) -> List[Any]:
snake_case_ = TFTransfoXLModelTester(self)
snake_case_ = ConfigTester(self, config_class=lowerCAmelCase__, d_embed=37)
def a_ ( self) -> Dict:
self.config_tester.run_common_tests()
def a_ ( self) -> Dict:
self.model_tester.set_seed()
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*lowerCAmelCase__)
def a_ ( self) -> Tuple:
self.model_tester.set_seed()
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*lowerCAmelCase__)
def a_ ( self) -> Union[str, Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowerCAmelCase__)
def a_ ( self) -> Any:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
snake_case_ = model_class(lowerCAmelCase__)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class in list_other_models_with_output_ebd:
snake_case_ = model.get_output_embeddings()
assert isinstance(lowerCAmelCase__, tf.keras.layers.Layer)
snake_case_ = model.get_bias()
assert name is None
else:
snake_case_ = model.get_output_embeddings()
assert x is None
snake_case_ = model.get_bias()
assert name is None
def a_ ( self) -> Optional[int]:
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def a_ ( self) -> str:
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFTransfoXLModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.')
def a_ ( self) -> Union[str, Any]:
pass
@require_tf
class UpperCamelCase ( unittest.TestCase ):
@unittest.skip('Skip test until #12651 is resolved.')
@slow
def a_ ( self) -> List[Any]:
snake_case_ = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103')
# fmt: off
snake_case_ = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]], dtype=tf.intaa) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
snake_case_ = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
snake_case_ = model.generate(lowerCAmelCase__, max_length=200, do_sample=lowerCAmelCase__)
self.assertListEqual(output_ids[0].numpy().tolist(), lowerCAmelCase__)
| 69
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 33
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = len(lowerCAmelCase ) + 1
_lowerCAmelCase = len(lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_lowerCAmelCase = [[0 for i in range(lowerCAmelCase )] for j in range(lowerCAmelCase )]
# since string of zero length match pattern of zero length
_lowerCAmelCase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , lowerCAmelCase ):
_lowerCAmelCase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , lowerCAmelCase ):
_lowerCAmelCase = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , lowerCAmelCase ):
for j in range(1 , lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_lowerCAmelCase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_lowerCAmelCase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_lowerCAmelCase = dp[i - 1][j]
else:
_lowerCAmelCase = 0
else:
_lowerCAmelCase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
A__ : List[Any] ='''aab'''
A__ : Optional[int] ='''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 70
|
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__A : Union[str, Any] = logging.get_logger(__name__)
# General docstring
__A : Tuple = '''MobileNetV1Config'''
# Base docstring
__A : Union[str, Any] = '''google/mobilenet_v1_1.0_224'''
__A : Union[str, Any] = [1, 1_024, 7, 7]
# Image classification docstring
__A : Optional[Any] = '''google/mobilenet_v1_1.0_224'''
__A : List[Any] = '''tabby, tabby cat'''
__A : Union[str, Any] = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowercase ( __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Dict=None ):
lowercase_ : str = {}
if isinstance(__snake_case , __snake_case ):
lowercase_ : Union[str, Any] = model.mobilenet_va
else:
lowercase_ : Optional[Any] = model
lowercase_ : Union[str, Any] = '''MobilenetV1/Conv2d_0/'''
lowercase_ : Union[str, Any] = backbone.conv_stem.convolution.weight
lowercase_ : Optional[Any] = backbone.conv_stem.normalization.bias
lowercase_ : Union[str, Any] = backbone.conv_stem.normalization.weight
lowercase_ : Any = backbone.conv_stem.normalization.running_mean
lowercase_ : int = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
lowercase_ : Optional[int] = i + 1
lowercase_ : Union[str, Any] = i * 2
lowercase_ : Optional[Any] = backbone.layer[pt_index]
lowercase_ : Union[str, Any] = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
lowercase_ : str = pointer.convolution.weight
lowercase_ : int = pointer.normalization.bias
lowercase_ : Any = pointer.normalization.weight
lowercase_ : Dict = pointer.normalization.running_mean
lowercase_ : Union[str, Any] = pointer.normalization.running_var
lowercase_ : Any = backbone.layer[pt_index + 1]
lowercase_ : Union[str, Any] = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
lowercase_ : int = pointer.convolution.weight
lowercase_ : str = pointer.normalization.bias
lowercase_ : Tuple = pointer.normalization.weight
lowercase_ : Dict = pointer.normalization.running_mean
lowercase_ : Any = pointer.normalization.running_var
if isinstance(__snake_case , __snake_case ):
lowercase_ : Optional[Any] = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
lowercase_ : Any = model.classifier.weight
lowercase_ : Optional[int] = model.classifier.bias
return tf_to_pt_map
def lowercase ( __snake_case : Optional[int] , __snake_case : int , __snake_case : Dict ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
lowercase_ : Tuple = tf.train.list_variables(__snake_case )
lowercase_ : int = {}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
lowercase_ : Optional[Any] = tf.train.load_variable(__snake_case , __snake_case )
lowercase_ : Optional[int] = array
# Build TF to PyTorch weights loading map
lowercase_ : Any = _build_tf_to_pytorch_map(__snake_case , __snake_case , __snake_case )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
lowercase_ : Union[str, Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
lowercase_ : Any = np.transpose(__snake_case , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
lowercase_ : Optional[int] = array.squeeze().transpose()
else:
lowercase_ : Optional[int] = np.transpose(__snake_case , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
lowercase_ : str = torch.from_numpy(__snake_case )
tf_weights.pop(__snake_case , __snake_case )
tf_weights.pop(name + '''/RMSProp''' , __snake_case )
tf_weights.pop(name + '''/RMSProp_1''' , __snake_case )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , __snake_case )
logger.info(F'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' )
return model
def lowercase ( __snake_case : torch.Tensor , __snake_case : nn.Convad ):
lowercase_ , lowercase_ : Optional[int] = features.shape[-2:]
lowercase_ , lowercase_ : str = conv_layer.stride
lowercase_ , lowercase_ : Tuple = conv_layer.kernel_size
if in_height % stride_height == 0:
lowercase_ : Dict = max(kernel_height - stride_height , 0 )
else:
lowercase_ : List[Any] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
lowercase_ : str = max(kernel_width - stride_width , 0 )
else:
lowercase_ : int = max(kernel_width - (in_width % stride_width) , 0 )
lowercase_ : int = pad_along_width // 2
lowercase_ : Union[str, Any] = pad_along_width - pad_left
lowercase_ : Tuple = pad_along_height // 2
lowercase_ : List[str] = pad_along_height - pad_top
lowercase_ : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__snake_case , __snake_case , '''constant''' , 0.0 )
class _UpperCAmelCase ( nn.Module ):
def __init__( self : List[Any] , A : MobileNetVaConfig , A : int , A : int , A : int , A : Optional[int] = 1 , A : Optional[int] = 1 , A : bool = False , A : Optional[bool] = True , A : Optional[bool or str] = True , ) -> None:
super().__init__()
lowercase_ : int = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
lowercase_ : Tuple = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowercase_ : int = nn.Convad(
in_channels=A , out_channels=A , kernel_size=A , stride=A , padding=A , groups=A , bias=A , padding_mode='''zeros''' , )
if use_normalization:
lowercase_ : Optional[Any] = nn.BatchNormad(
num_features=A , eps=config.layer_norm_eps , momentum=0.9997 , affine=A , track_running_stats=A , )
else:
lowercase_ : Union[str, Any] = None
if use_activation:
if isinstance(A , A ):
lowercase_ : str = ACTaFN[use_activation]
elif isinstance(config.hidden_act , A ):
lowercase_ : Any = ACTaFN[config.hidden_act]
else:
lowercase_ : Tuple = config.hidden_act
else:
lowercase_ : Tuple = None
def A ( self : str , A : torch.Tensor ) -> torch.Tensor:
if self.config.tf_padding:
lowercase_ : List[Any] = apply_tf_padding(A , self.convolution )
lowercase_ : Optional[int] = self.convolution(A )
if self.normalization is not None:
lowercase_ : Union[str, Any] = self.normalization(A )
if self.activation is not None:
lowercase_ : Optional[int] = self.activation(A )
return features
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Optional[int] = MobileNetVaConfig
SCREAMING_SNAKE_CASE_ : int = load_tf_weights_in_mobilenet_va
SCREAMING_SNAKE_CASE_ : Optional[Any] = "mobilenet_v1"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "pixel_values"
SCREAMING_SNAKE_CASE_ : List[str] = False
def A ( self : Any , A : Union[nn.Linear, nn.Convad] ) -> None:
if isinstance(A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(A , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__A : Union[str, Any] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__A : List[str] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , _A , )
class _UpperCAmelCase ( _A ):
def __init__( self : str , A : MobileNetVaConfig , A : bool = True ) -> int:
super().__init__(A )
lowercase_ : Union[str, Any] = config
lowercase_ : List[str] = 32
lowercase_ : str = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowercase_ : Union[str, Any] = MobileNetVaConvLayer(
A , in_channels=config.num_channels , out_channels=A , kernel_size=3 , stride=2 , )
lowercase_ : Optional[Any] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowercase_ : List[Any] = nn.ModuleList()
for i in range(13 ):
lowercase_ : Dict = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowercase_ : str = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
A , in_channels=A , out_channels=A , kernel_size=3 , stride=strides[i] , groups=A , ) )
self.layer.append(
MobileNetVaConvLayer(
A , in_channels=A , out_channels=A , kernel_size=1 , ) )
lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def A ( self : Any , A : Optional[Any] ) -> Optional[int]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : List[Any] , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
lowercase_ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
lowercase_ : List[str] = self.conv_stem(A )
lowercase_ : Dict = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowercase_ : Optional[int] = layer_module(A )
if output_hidden_states:
lowercase_ : str = all_hidden_states + (hidden_states,)
lowercase_ : Tuple = hidden_states
if self.pooler is not None:
lowercase_ : Dict = torch.flatten(self.pooler(A ) , start_dim=1 )
else:
lowercase_ : Optional[Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A , pooler_output=A , hidden_states=A , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _A , )
class _UpperCAmelCase ( _A ):
def __init__( self : List[str] , A : MobileNetVaConfig ) -> None:
super().__init__(A )
lowercase_ : int = config.num_labels
lowercase_ : List[str] = MobileNetVaModel(A )
lowercase_ : Union[str, Any] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowercase_ : Tuple = nn.Dropout(config.classifier_dropout_prob , inplace=A )
lowercase_ : int = nn.Linear(A , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Optional[Any] , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : List[Any] = self.mobilenet_va(A , output_hidden_states=A , return_dict=A )
lowercase_ : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : Dict = self.classifier(self.dropout(A ) )
lowercase_ : int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ : List[str] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ : Optional[Any] = '''single_label_classification'''
else:
lowercase_ : Tuple = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase_ : str = MSELoss()
if self.num_labels == 1:
lowercase_ : List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ : List[str] = loss_fct(A , A )
elif self.config.problem_type == "single_label_classification":
lowercase_ : List[Any] = CrossEntropyLoss()
lowercase_ : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ : str = BCEWithLogitsLoss()
lowercase_ : List[Any] = loss_fct(A , A )
if not return_dict:
lowercase_ : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=A , logits=A , hidden_states=outputs.hidden_states , )
| 33
| 0
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def A ( a_ ,a_ ,a_ ,a_ ) -> List[str]:
for param, grad_param in zip(model_a.parameters() ,model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,grad_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad ,grad_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def A ( a_ ,a_ ,a_ ,a_ ,a_=True ) -> Tuple:
model.train()
__UpperCamelCase : List[Any] =model(a_ )
__UpperCamelCase : List[Any] =F.mse_loss(a_ ,target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(a_ )
def A ( a_ ,a_=False ) -> List[Any]:
set_seed(42 )
__UpperCamelCase : str =RegressionModel()
__UpperCamelCase : int =deepcopy(a_ )
__UpperCamelCase : Optional[int] =RegressionDataset(length=80 )
__UpperCamelCase : List[Any] =DataLoader(a_ ,batch_size=16 )
model.to(accelerator.device )
if sched:
__UpperCamelCase : Optional[int] =AdamW(params=model.parameters() ,lr=1e-3 )
__UpperCamelCase : Tuple =AdamW(params=ddp_model.parameters() ,lr=1e-3 )
__UpperCamelCase : int =LambdaLR(a_ ,lr_lambda=lambda a_ : epoch**0.65 )
__UpperCamelCase : Tuple =LambdaLR(a_ ,lr_lambda=lambda a_ : epoch**0.65 )
# Make a copy of `model`
if sched:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Dict =accelerator.prepare(a_ ,a_ ,a_ ,a_ )
else:
__UpperCamelCase , __UpperCamelCase : str =accelerator.prepare(a_ ,a_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def A ( a_ ) -> Union[str, Any]:
# Test when on a single CPU or GPU that the context manager does nothing
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any =get_training_setup(a_ )
# Use a single batch
__UpperCamelCase , __UpperCamelCase : int =next(iter(a_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCamelCase , __UpperCamelCase : List[str] =accelerator.gather((ddp_input, ddp_target) )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(a_ ,a_ ,a_ ,a_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(a_ ):
step_model(a_ ,a_ ,a_ ,a_ )
else:
# Sync grads
step_model(a_ ,a_ ,a_ ,a_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(a_ ,a_ ,a_ ,a_ )
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad ,ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
__UpperCamelCase : str =ddp_input[torch.randperm(len(a_ ) )]
def A ( a_ ) -> Tuple:
# Test on distributed setup that context manager behaves properly
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Dict =get_training_setup(a_ )
# Use a single batch
__UpperCamelCase , __UpperCamelCase : Optional[int] =next(iter(a_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCamelCase , __UpperCamelCase : Optional[Any] =accelerator.gather((ddp_input, ddp_target) )
__UpperCamelCase , __UpperCamelCase : int =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(a_ ,a_ ,a_ ,a_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(a_ ):
step_model(a_ ,a_ ,a_ ,a_ )
else:
# Sync grads
step_model(a_ ,a_ ,a_ ,a_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is False
), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is True
), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
__UpperCamelCase : Optional[int] =ddp_input[torch.randperm(len(a_ ) )]
def A ( a_=False ,a_=False ) -> Tuple:
__UpperCamelCase : Union[str, Any] =Accelerator(
split_batches=a_ ,dispatch_batches=a_ ,gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple =get_training_setup(a_ )
for iteration, batch in enumerate(a_ ):
__UpperCamelCase , __UpperCamelCase : Tuple =batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCamelCase , __UpperCamelCase : Union[str, Any] =accelerator.gather((ddp_input, ddp_target) )
__UpperCamelCase , __UpperCamelCase : Any =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(a_ ,a_ ,a_ ,a_ ,a_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(a_ ):
step_model(a_ ,a_ ,a_ ,a_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(a_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
__UpperCamelCase : Union[str, Any] =ddp_input[torch.randperm(len(a_ ) )]
GradientState._reset_state()
def A ( a_=False ,a_=False ) -> Dict:
__UpperCamelCase : int =Accelerator(
split_batches=a_ ,dispatch_batches=a_ ,gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str =get_training_setup(a_ ,a_ )
for iteration, batch in enumerate(a_ ):
__UpperCamelCase , __UpperCamelCase : List[Any] =batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCamelCase , __UpperCamelCase : Dict =accelerator.gather((ddp_input, ddp_target) )
__UpperCamelCase , __UpperCamelCase : Optional[int] =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(a_ ,a_ ,a_ ,a_ ,a_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(a_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(a_ ):
step_model(a_ ,a_ ,a_ ,a_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
__UpperCamelCase : Tuple =(((iteration + 1) % 2) == 0) or ((iteration + 1) == len(a_ ))
if accelerator.num_processes > 1:
check_model_parameters(a_ ,a_ ,a_ ,a_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def A ( ) -> int:
__UpperCamelCase : List[Any] =Accelerator()
__UpperCamelCase : Any =RegressionDataset(length=80 )
__UpperCamelCase : Any =DataLoader(a_ ,batch_size=16 )
__UpperCamelCase : List[Any] =RegressionDataset(length=96 )
__UpperCamelCase : Optional[int] =DataLoader(a_ ,batch_size=16 )
__UpperCamelCase , __UpperCamelCase : Dict =accelerator.prepare(a_ ,a_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(a_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(a_ )
if iteration < len(a_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(a_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(a_ )
if batch_num < len(a_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def A ( ) -> Union[str, Any]:
__UpperCamelCase : List[Any] =Accelerator()
__UpperCamelCase : List[str] =accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(a_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(a_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' ,F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' ,)
test_gradient_accumulation(a_ ,a_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' ,'2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' ,'`split_batches=False`, `dispatch_batches=False`**' ,)
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' ,F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' ,)
test_gradient_accumulation_with_opt_and_scheduler(a_ ,a_ )
def A ( a_ ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 71
|
"""simple docstring"""
def lowercase ( __snake_case : list[int] ):
lowercase_ : List[Any] = len(__snake_case )
for i in range(__snake_case ):
for j in range(i + 1 , __snake_case ):
if numbers[j] < numbers[i]:
lowercase_ , lowercase_ : Optional[int] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__A : int = input('''Enter numbers separated by a comma:\n''').strip()
__A : Any = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 33
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : List[Any] , A : Any , A : Tuple=7 , A : Tuple=3 , A : Optional[Any]=30 , A : List[Any]=4_00 , A : Tuple=True , A : Dict=None , A : List[str]=True , A : Optional[int]=[0.5, 0.5, 0.5] , A : Tuple=[0.5, 0.5, 0.5] , A : List[str]=True , A : List[Any]=1 / 2_55 , A : Union[str, Any]=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase_ : Optional[int] = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
lowercase_ : Optional[int] = parent
lowercase_ : str = batch_size
lowercase_ : Tuple = num_channels
lowercase_ : str = min_resolution
lowercase_ : Any = max_resolution
lowercase_ : str = do_resize
lowercase_ : Any = size
lowercase_ : Optional[int] = do_normalize
lowercase_ : List[str] = image_mean
lowercase_ : Optional[Any] = image_std
lowercase_ : int = do_rescale
lowercase_ : List[str] = rescale_factor
lowercase_ : int = do_pad
def A ( self : Any ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A ( self : Optional[Any] , A : int , A : int=False ) -> Tuple:
if not batched:
lowercase_ : Optional[int] = image_inputs[0]
if isinstance(A , Image.Image ):
lowercase_ , lowercase_ : int = image.size
else:
lowercase_ , lowercase_ : Tuple = image.shape[1], image.shape[2]
if w < h:
lowercase_ : int = int(self.size['''shortest_edge'''] * h / w )
lowercase_ : Optional[Any] = self.size['''shortest_edge''']
elif w > h:
lowercase_ : Optional[Any] = self.size['''shortest_edge''']
lowercase_ : Optional[int] = int(self.size['''shortest_edge'''] * w / h )
else:
lowercase_ : Any = self.size['''shortest_edge''']
lowercase_ : Any = self.size['''shortest_edge''']
else:
lowercase_ : Tuple = []
for image in image_inputs:
lowercase_ , lowercase_ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase_ : Union[str, Any] = max(A , key=lambda A : item[0] )[0]
lowercase_ : Optional[Any] = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCAmelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = YolosImageProcessor if is_vision_available() else None
def A ( self : Optional[int] ) -> Optional[int]:
lowercase_ : Optional[Any] = YolosImageProcessingTester(self )
@property
def A ( self : str ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Optional[int] ) -> List[str]:
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def A ( self : Dict ) -> Tuple:
lowercase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , A )
lowercase_ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , A )
def A ( self : Optional[int] ) -> Tuple:
pass
def A ( self : Tuple ) -> int:
# Initialize image_processing
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowercase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ , lowercase_ : Dict = self.image_processor_tester.get_expected_values(A , batched=A )
lowercase_ : str = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : str ) -> Any:
# Initialize image_processing
lowercase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowercase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : int = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ : Optional[int] = image_processing(A , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : List[Any] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Optional[int]:
# Initialize image_processing
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowercase_ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ : Any = image_processing(A , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : List[str] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Optional[Any]:
# Initialize image_processings
lowercase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
lowercase_ : Tuple = self.image_processing_class(do_resize=A , do_normalize=A , do_rescale=A )
# create random PyTorch tensors
lowercase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowercase_ : Union[str, Any] = image_processing_a.pad(A , return_tensors='''pt''' )
lowercase_ : List[Any] = image_processing_a(A , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def A ( self : str ) -> List[Any]:
# prepare image and target
lowercase_ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowercase_ : List[Any] = json.loads(f.read() )
lowercase_ : Tuple = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
lowercase_ : Union[str, Any] = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
lowercase_ : List[Any] = image_processing(images=A , annotations=A , return_tensors='''pt''' )
# verify pixel values
lowercase_ : Union[str, Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowercase_ : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowercase_ : Tuple = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowercase_ : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowercase_ : Any = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1e-3 ) )
# verify image_id
lowercase_ : List[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowercase_ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowercase_ : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify orig_size
lowercase_ : List[str] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowercase_ : Optional[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
@slow
def A ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
lowercase_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowercase_ : str = json.loads(f.read() )
lowercase_ : int = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
lowercase_ : List[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowercase_ : int = YolosImageProcessor(format='''coco_panoptic''' )
lowercase_ : Any = image_processing(images=A , annotations=A , masks_path=A , return_tensors='''pt''' )
# verify pixel values
lowercase_ : Optional[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowercase_ : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowercase_ : List[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowercase_ : str = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowercase_ : List[str] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1e-3 ) )
# verify image_id
lowercase_ : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowercase_ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowercase_ : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify masks
lowercase_ : Dict = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A )
# verify orig_size
lowercase_ : Tuple = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowercase_ : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
| 33
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a ={"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 73
|
"""simple docstring"""
def lowercase ( __snake_case : int = 1_0_0 ):
lowercase_ : str = 0
lowercase_ : List[Any] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 33
| 0
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ):
A = s.rsplit(snake_case__ , snake_case__ )
return new.join(snake_case__ )
def _snake_case ( snake_case__ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def _snake_case ( snake_case__ : Union[str, Any] ):
A = {}
A = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
A = key.replace(F'{group_key}.' , F'{group_key}.group.' )
if "res_path" in key:
A = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
A = rreplace(snake_case__ , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
A = rreplace(snake_case__ , '.b' , '.bias' , 1 )
A = value.float()
return upgrade
@torch.no_grad()
def _snake_case ( snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Tuple=None , snake_case__ : str=True ):
from dall_e import Encoder
A = Encoder()
if os.path.exists(snake_case__ ):
A = torch.load(snake_case__ )
else:
A = torch.hub.load_state_dict_from_url(snake_case__ )
if isinstance(snake_case__ , snake_case__ ):
A = ckpt.state_dict()
encoder.load_state_dict(snake_case__ )
if config_path is not None:
A = FlavaImageCodebookConfig.from_pretrained(snake_case__ )
else:
A = FlavaImageCodebookConfig()
A = FlavaImageCodebook(snake_case__ ).eval()
A = encoder.state_dict()
A = upgrade_state_dict(snake_case__ )
hf_model.load_state_dict(snake_case__ )
A = hf_model.state_dict()
A = count_parameters(snake_case__ )
A = count_parameters(snake_case__ )
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(snake_case__ )
else:
return hf_state_dict
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
_lowercase = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 74
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__A : str = parser.parse_args()
__A : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__A : Dict = CLIPImageProcessor()
__A : Union[str, Any] = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__A : List[str] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 33
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ : List[str] = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
a_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 75
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Any = KandinskyVaaControlnetImgaImgPipeline
SCREAMING_SNAKE_CASE_ : Optional[int] = ["image_embeds", "negative_image_embeds", "image", "hint"]
SCREAMING_SNAKE_CASE_ : str = ["image_embeds", "negative_image_embeds", "image", "hint"]
SCREAMING_SNAKE_CASE_ : Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
SCREAMING_SNAKE_CASE_ : Dict = False
@property
def A ( self : Any ) -> Any:
return 32
@property
def A ( self : Optional[int] ) -> Any:
return 32
@property
def A ( self : Dict ) -> int:
return self.time_input_dim
@property
def A ( self : Tuple ) -> str:
return self.time_input_dim * 4
@property
def A ( self : Any ) -> str:
return 1_00
@property
def A ( self : str ) -> List[str]:
torch.manual_seed(0 )
lowercase_ : List[Any] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase_ : Dict = UNetaDConditionModel(**A )
return model
@property
def A ( self : Optional[Any] ) -> Union[str, Any]:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def A ( self : List[Any] ) -> Dict:
torch.manual_seed(0 )
lowercase_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ : Tuple = self.dummy_unet
lowercase_ : int = self.dummy_movq
lowercase_ : List[Any] = {
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowercase_ : str = DDIMScheduler(**A )
lowercase_ : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def A ( self : Optional[int] , A : int , A : List[str]=0 ) -> int:
lowercase_ : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A ) ).to(A )
lowercase_ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A )
# create init_image
lowercase_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A ) ).to(A )
lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ : Optional[Any] = Image.fromarray(np.uinta(A ) ).convert('''RGB''' ).resize((2_56, 2_56) )
# create hint
lowercase_ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A ) ).to(A )
if str(A ).startswith('''mps''' ):
lowercase_ : Optional[Any] = torch.manual_seed(A )
else:
lowercase_ : List[Any] = torch.Generator(device=A ).manual_seed(A )
lowercase_ : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def A ( self : Any ) -> List[Any]:
lowercase_ : List[str] = '''cpu'''
lowercase_ : Any = self.get_dummy_components()
lowercase_ : Any = self.pipeline_class(**A )
lowercase_ : int = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowercase_ : Dict = pipe(**self.get_dummy_inputs(A ) )
lowercase_ : str = output.images
lowercase_ : int = pipe(
**self.get_dummy_inputs(A ) , return_dict=A , )[0]
lowercase_ : Dict = image[0, -3:, -3:, -1]
lowercase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ : List[str] = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : Tuple ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Any ) -> Optional[int]:
lowercase_ : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
lowercase_ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowercase_ : Optional[int] = init_image.resize((5_12, 5_12) )
lowercase_ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
lowercase_ : Optional[int] = torch.from_numpy(np.array(A ) ).float() / 255.0
lowercase_ : Tuple = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowercase_ : Optional[Any] = '''A robot, 4k photo'''
lowercase_ : Tuple = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(A )
lowercase_ : Dict = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
lowercase_ : int = pipeline.to(A )
pipeline.set_progress_bar_config(disable=A )
lowercase_ : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase_ , lowercase_ : int = pipe_prior(
A , image=A , strength=0.85 , generator=A , negative_prompt='''''' , ).to_tuple()
lowercase_ : str = pipeline(
image=A , image_embeds=A , negative_image_embeds=A , hint=A , generator=A , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type='''np''' , )
lowercase_ : Optional[Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(A , A )
| 33
| 0
|
from math import factorial
def lowerCamelCase__ ( _a , _a , _a):
if successes > trials:
raise ValueError("successes must be lower or equal to trials")
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers")
if not isinstance(_a , _a) or not isinstance(_a , _a):
raise ValueError("the function is defined for non-negative integers")
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0")
SCREAMING_SNAKE_CASE : int = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
SCREAMING_SNAKE_CASE : List[Any] = float(factorial(_a))
coefficient /= factorial(_a) * factorial(trials - successes)
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 76
|
"""simple docstring"""
def lowercase ( __snake_case : int = 1_0_0_0 ):
lowercase_ , lowercase_ : str = 1, 1
lowercase_ : List[str] = 2
while True:
lowercase_ : Tuple = 0
lowercase_ : List[Any] = fa + fa
lowercase_ , lowercase_ : Optional[int] = fa, f
index += 1
for _ in str(__snake_case ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 33
| 0
|
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_UpperCamelCase : Optional[int] = logging.get_logger(__name__)
_UpperCamelCase : str = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_UpperCamelCase : Optional[Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[str] ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase__ : Any = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
lowercase__ : str = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
lowercase__ : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowercase__ : Optional[int] = value
elif weight_type == "weight_g":
lowercase__ : int = value
elif weight_type == "weight_v":
lowercase__ : Optional[Any] = value
elif weight_type == "bias":
lowercase__ : int = value
else:
lowercase__ : Dict = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ : str = []
lowercase__ : int = fairseq_model.state_dict()
lowercase__ : Union[str, Any] = hf_model.feature_extractor
lowercase__ : Dict = hf_model.adapter
for name, value in fairseq_dict.items():
lowercase__ : int = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : Tuple = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Dict = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowercase__ : Optional[Any] = True
if "*" in mapped_key:
lowercase__ : str = name.split(_lowerCAmelCase )[0].split('.' )[-2]
lowercase__ : Tuple = mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
lowercase__ : Tuple = 'weight_g'
elif "weight_v" in name:
lowercase__ : Any = 'weight_v'
elif "bias" in name:
lowercase__ : Tuple = 'bias'
elif "weight" in name:
lowercase__ : List[str] = 'weight'
else:
lowercase__ : str = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Union[str, Any] = full_name.split('conv_layers.' )[-1]
lowercase__ : str = name.split('.' )
lowercase__ : Union[str, Any] = int(items[0] )
lowercase__ : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowercase__ : Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowercase__ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowercase__ : Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowercase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
lowercase__ : Tuple = full_name.split('adaptor.' )[-1]
lowercase__ : Dict = name.split('.' )
if items[1].isdigit():
lowercase__ : Dict = int(items[1] )
else:
lowercase__ : Union[str, Any] = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
lowercase__ : Any = value
logger.info(f"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
lowercase__ : List[Any] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
lowercase__ : Optional[Any] = value
logger.info(f"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
lowercase__ : Union[str, Any] = value
logger.info(f"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
lowercase__ : int = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
lowercase__ : Dict = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
def a_ ( _lowerCAmelCase : Any ):
'''simple docstring'''
lowercase__ , lowercase__ : Union[str, Any] = emb.weight.shape
lowercase__ : str = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
lowercase__ : Optional[int] = emb.weight.data
return lin_layer
@torch.no_grad()
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , ):
'''simple docstring'''
lowercase__ : Optional[int] = WavaVecaConfig.from_pretrained(
_lowerCAmelCase , add_adapter=_lowerCAmelCase , adapter_stride=_lowerCAmelCase , adapter_kernel_size=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , output_hidden_size=_lowerCAmelCase , )
lowercase__ : List[str] = MBartConfig.from_pretrained(_lowerCAmelCase )
# load model
lowercase__ , lowercase__ , lowercase__ : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
lowercase__ : List[str] = model[0].eval()
# load feature extractor
lowercase__ : Tuple = WavaVecaFeatureExtractor.from_pretrained(_lowerCAmelCase , use_auth_token=_lowerCAmelCase )
# set weights for wav2vec2 encoder
lowercase__ : int = WavaVecaModel(_lowerCAmelCase )
recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase )
# load decoder weights
lowercase__ : List[Any] = MBartForCausalLM(_lowerCAmelCase )
lowercase__ , lowercase__ : Union[str, Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase )
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
lowercase__ : Optional[int] = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowercase__ : Dict = False
lowercase__ : Any = MBartaaTokenizer(_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
lowercase__ : str = hf_wavavec.config.to_dict()
lowercase__ : Dict = tokenizer.pad_token_id
lowercase__ : str = tokenizer.bos_token_id
lowercase__ : Tuple = tokenizer.eos_token_id
lowercase__ : Any = 'mbart50'
lowercase__ : Union[str, Any] = 'wav2vec2'
lowercase__ : Optional[int] = tokenizer.eos_token_id
lowercase__ : Union[str, Any] = 25_0004
lowercase__ : int = tokenizer.eos_token_id
lowercase__ : str = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
feature_extractor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=10_24, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=25_00_04, type=int, help="`decoder_start_token_id` of model config")
_UpperCamelCase : Any = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 77
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : Union[str, Any] = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "vit_mae"
def __init__( self : Dict , A : List[str]=7_68 , A : Any=12 , A : Union[str, Any]=12 , A : Tuple=30_72 , A : Any="gelu" , A : Tuple=0.0 , A : List[str]=0.0 , A : Tuple=0.02 , A : Tuple=1e-12 , A : int=2_24 , A : Dict=16 , A : int=3 , A : Tuple=True , A : Tuple=16 , A : Optional[Any]=5_12 , A : Union[str, Any]=8 , A : List[Any]=20_48 , A : Dict=0.75 , A : Any=False , **A : Optional[int] , ) -> Union[str, Any]:
super().__init__(**A )
lowercase_ : List[Any] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : int = initializer_range
lowercase_ : Dict = layer_norm_eps
lowercase_ : Optional[Any] = image_size
lowercase_ : str = patch_size
lowercase_ : Dict = num_channels
lowercase_ : Any = qkv_bias
lowercase_ : Union[str, Any] = decoder_num_attention_heads
lowercase_ : Optional[Any] = decoder_hidden_size
lowercase_ : List[str] = decoder_num_hidden_layers
lowercase_ : List[Any] = decoder_intermediate_size
lowercase_ : Optional[Any] = mask_ratio
lowercase_ : Optional[Any] = norm_pix_loss
| 33
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
snake_case_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
snake_case_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """whisper"""
__UpperCamelCase = ["""past_key_values"""]
__UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Optional[Any] , lowercase_ :Dict=5_18_65 , lowercase_ :Tuple=80 , lowercase_ :str=6 , lowercase_ :Optional[Any]=4 , lowercase_ :Union[str, Any]=6 , lowercase_ :Any=4 , lowercase_ :Optional[int]=15_36 , lowercase_ :Optional[Any]=15_36 , lowercase_ :Tuple=0.0 , lowercase_ :List[Any]=0.0 , lowercase_ :List[Any]=5_02_57 , lowercase_ :str=True , lowercase_ :Optional[int]=True , lowercase_ :Tuple="gelu" , lowercase_ :Union[str, Any]=2_56 , lowercase_ :Optional[int]=0.0 , lowercase_ :List[Any]=0.0 , lowercase_ :List[str]=0.0 , lowercase_ :Dict=0.02 , lowercase_ :Any=False , lowercase_ :str=15_00 , lowercase_ :Union[str, Any]=4_48 , lowercase_ :List[str]=5_02_56 , lowercase_ :Any=5_02_56 , lowercase_ :Optional[int]=5_02_56 , lowercase_ :List[Any]=None , lowercase_ :Dict=[2_20, 5_02_56] , lowercase_ :Optional[int]=False , lowercase_ :Optional[int]=2_56 , lowercase_ :Tuple=False , lowercase_ :Any=0.05 , lowercase_ :Tuple=10 , lowercase_ :Dict=2 , lowercase_ :Union[str, Any]=0.0 , lowercase_ :List[str]=10 , lowercase_ :str=0 , lowercase_ :Tuple=7 , **lowercase_ :Optional[int] , ) -> Optional[Any]:
UpperCAmelCase = vocab_size
UpperCAmelCase = num_mel_bins
UpperCAmelCase = d_model
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = max_source_positions
UpperCAmelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase = classifier_proj_size
UpperCAmelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
UpperCAmelCase = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self :Any ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase = {0: 'batch'}
else:
UpperCAmelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase__ ( self :int , lowercase_ :Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ :int = -1 , lowercase_ :int = -1 , lowercase_ :bool = False , lowercase_ :Optional["TensorType"] = None , lowercase_ :int = 2_20_50 , lowercase_ :float = 5.0 , lowercase_ :int = 2_20 , ) -> Mapping[str, Any]:
UpperCAmelCase = OrderedDict()
UpperCAmelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase = encoder_inputs['input_features'].shape[2]
UpperCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = encoder_inputs.pop('input_features' )
UpperCAmelCase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase__ ( self :Dict ) -> float:
return 1E-3
| 78
|
"""simple docstring"""
def lowercase ( __snake_case : int ):
if n == 1 or not isinstance(__snake_case , __snake_case ):
return 0
elif n == 2:
return 1
else:
lowercase_ : Dict = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase ( __snake_case : int ):
lowercase_ : str = 0
lowercase_ : List[str] = 2
while digits < n:
index += 1
lowercase_ : Any = len(str(fibonacci(__snake_case ) ) )
return index
def lowercase ( __snake_case : int = 1_0_0_0 ):
return fibonacci_digits_index(__snake_case )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 33
| 0
|
'''simple docstring'''
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : list ):
'''simple docstring'''
_A = set_counts
_A = max(__UpperCAmelCase )
_A = len(__UpperCAmelCase )
_A = [1] * num_sets
_A = list(range(__UpperCAmelCase ) )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : int ):
'''simple docstring'''
_A = self.get_parent(__UpperCAmelCase )
_A = self.get_parent(__UpperCAmelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
_A = 0
_A = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
_A = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
_A = 0
_A = src_parent
_A = self.set_counts[src_parent]
_A = max(self.max_set , __UpperCAmelCase )
return True
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : int ):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
_A = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 79
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A : List[str] = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''MobileNetV2FeatureExtractor''']
__A : Optional[int] = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33
| 0
|
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _UpperCamelCase ( __A ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {}
UpperCamelCase__ = tokenizer(example["content"] , truncation=__A )["input_ids"]
UpperCamelCase__ = len(example["content"] ) / len(output["input_ids"] )
return output
a__ : Optional[Any] = HfArgumentParser(PretokenizationArguments)
a__ : Optional[int] = parser.parse_args()
if args.num_workers is None:
a__ : str = multiprocessing.cpu_count()
a__ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a__ : List[str] = time.time()
a__ : Dict = load_dataset(args.dataset_name, split='train')
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
a__ : Union[str, Any] = time.time()
a__ : List[Any] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
a__ : Optional[int] = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 80
|
"""simple docstring"""
from __future__ import annotations
__A : List[Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__A : str = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = []
lowercase_ : List[Any] = len(__snake_case )
for i in range(__snake_case ):
lowercase_ : float = -1
for j in range(i + 1 , __snake_case ):
if arr[i] < arr[j]:
lowercase_ : List[str] = arr[j]
break
result.append(__snake_case )
return result
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = []
for i, outer in enumerate(__snake_case ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : List[Any] = inner
break
result.append(__snake_case )
return result
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = len(__snake_case )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(__snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__A : int = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 33
| 0
|
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : str = logging.get_logger()
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = field(default_factory=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase = field(default_factory=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> Any:
a =len(list(m.modules() ) ) == 1 or isinstance(__A , nn.Convad ) or isinstance(__A , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__A )
def __call__( self , __A ) -> int:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__A )
[x.remove() for x in self.handles]
return self
@property
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda __A : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 0
__lowerCAmelCase = field(default_factory=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase = field(default_factory=_SCREAMING_SNAKE_CASE )
def __call__( self , __A ) -> int:
a =Tracker(self.dest )(__A ).parametrized
a =Tracker(self.src )(__A ).parametrized
a =list(filter(lambda __A : type(__A ) not in self.src_skip , __A ) )
a =list(filter(lambda __A : type(__A ) not in self.dest_skip , __A ) )
if len(__A ) != len(__A ):
raise Exception(
f'''Numbers of operations are different. Source module has {len(__A )} operations while'''
f''' destination module has {len(__A )}.''' )
for dest_m, src_m in zip(__A , __A ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
def _A ( lowercase , lowercase , lowercase , lowercase = True ):
"""simple docstring"""
print(f'''Converting {name}...''' )
with torch.no_grad():
a =timm.create_model(lowercase , pretrained=lowercase ).eval()
a =ResNetForImageClassification(lowercase ).eval()
a =ModuleTransfer(src=lowercase , dest=lowercase )
a =torch.randn((1, 3, 2_24, 2_24) )
module_transfer(lowercase )
assert torch.allclose(from_model(lowercase ) , our_model(lowercase ).logits ), "The model logits don't match the original one."
a =f'''resnet{"-".join(name.split("resnet" ) )}'''
print(lowercase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=lowercase , )
# we can use the convnext one
a =AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=lowercase , )
print(f'''Pushed {checkpoint_name}''' )
def _A ( lowercase , lowercase = None , lowercase = True ):
"""simple docstring"""
a ='''imagenet-1k-id2label.json'''
a =10_00
a =(1, num_labels)
a ='''huggingface/label-files'''
a =num_labels
a =json.load(open(hf_hub_download(lowercase , lowercase , repo_type='''dataset''' ) , '''r''' ) )
a ={int(lowercase ): v for k, v in idalabel.items()}
a =idalabel
a ={v: k for k, v in idalabel.items()}
a =partial(lowercase , num_labels=lowercase , idalabel=lowercase , labelaid=lowercase )
a ={
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(lowercase , names_to_config[model_name] , lowercase , lowercase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowercase , lowercase , lowercase , lowercase )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowerCamelCase_ : Dict = parser.parse_args()
lowerCamelCase_ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 81
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = StableUnCLIPImgaImgPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowerCamelCase = frozenset([] )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = 32
_lowerCAmelCase = embedder_hidden_size
# image encoding components
_lowerCAmelCase = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_snake_case , projection_dim=_snake_case , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
_lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_snake_case )
_lowerCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_snake_case , layers_per_block=1 , upcast_attention=_snake_case , use_linear_projection=_snake_case , )
torch.manual_seed(0 )
_lowerCAmelCase = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=_snake_case , steps_offset=1 , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = {
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def snake_case ( self , _snake_case , _snake_case=0 , _snake_case=True ):
"""simple docstring"""
if str(_snake_case ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(_snake_case )
else:
_lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
if pil_image:
_lowerCAmelCase = input_image * 0.5 + 0.5
_lowerCAmelCase = input_image.clamp(0 , 1 )
_lowerCAmelCase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_lowerCAmelCase = DiffusionPipeline.numpy_to_pil(_snake_case )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = StableUnCLIPImgaImgPipeline(**_snake_case )
_lowerCAmelCase = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
inputs.update({"""image_embeds""": None} )
_lowerCAmelCase = sd_pipe(**_snake_case ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=_snake_case )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_snake_case )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
_lowerCAmelCase = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase = pipe(_snake_case , """anime turle""" , generator=_snake_case , output_type="""np""" )
_lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_snake_case , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
_lowerCAmelCase = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase = pipe(_snake_case , """anime turle""" , generator=_snake_case , output_type="""np""" )
_lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_snake_case , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
_lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase = pipe(
_snake_case , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 82
|
"""simple docstring"""
def lowercase ( __snake_case : int ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowercase__ ( lowercase ):
lowercase__ = """mobilenet_v1"""
def __init__( self : List[Any] ,lowerCamelCase__ : List[str]=3 ,lowerCamelCase__ : Tuple=224 ,lowerCamelCase__ : str=1.0 ,lowerCamelCase__ : List[str]=8 ,lowerCamelCase__ : int="relu6" ,lowerCamelCase__ : str=True ,lowerCamelCase__ : Optional[Any]=0.9_9_9 ,lowerCamelCase__ : str=0.0_2 ,lowerCamelCase__ : Optional[Any]=0.0_0_1 ,**lowerCamelCase__ : Dict ,):
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_UpperCamelCase : Tuple = num_channels
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : int = depth_multiplier
_UpperCamelCase : Any = min_depth
_UpperCamelCase : Optional[int] = hidden_act
_UpperCamelCase : List[Any] = tf_padding
_UpperCamelCase : Optional[int] = classifier_dropout_prob
_UpperCamelCase : List[Any] = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
class lowercase__ ( lowercase ):
lowercase__ = version.parse("""1.11""" )
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return 1E-4
| 83
|
"""simple docstring"""
def lowercase ( __snake_case : Optional[int] ):
lowercase_ : int = 0
lowercase_ : Optional[Any] = len(__snake_case )
for i in range(n - 1 ):
for j in range(i + 1 , __snake_case ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowercase ( __snake_case : str ):
if len(__snake_case ) <= 1:
return arr, 0
lowercase_ : Optional[Any] = len(__snake_case ) // 2
lowercase_ : List[Any] = arr[0:mid]
lowercase_ : Union[str, Any] = arr[mid:]
lowercase_ , lowercase_ : Tuple = count_inversions_recursive(__snake_case )
lowercase_ , lowercase_ : List[Any] = count_inversions_recursive(__snake_case )
lowercase_ , lowercase_ : List[Any] = _count_cross_inversions(__snake_case , __snake_case )
lowercase_ : List[Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowercase ( __snake_case : str , __snake_case : Optional[int] ):
lowercase_ : Optional[Any] = []
lowercase_ : Any = 0
while i < len(__snake_case ) and j < len(__snake_case ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__snake_case ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__snake_case ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowercase ( ):
lowercase_ : Union[str, Any] = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowercase_ : int = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : Dict = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __snake_case )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowercase_ : Dict = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : Dict = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __snake_case )
# an empty list should also have zero inversions
lowercase_ : List[Any] = []
lowercase_ : Any = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : List[str] = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __snake_case )
if __name__ == "__main__":
main()
| 33
| 0
|
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Tuple = PegasusTokenizer
UpperCAmelCase_ :List[Any] = PegasusTokenizerFast
UpperCAmelCase_ :Optional[int] = True
UpperCAmelCase_ :Union[str, Any] = True
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ :Optional[int] = PegasusTokenizer(__A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCAmelCase ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def __lowerCAmelCase ( self , **__A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__A )
def __lowerCAmelCase ( self , __A ) -> List[Any]:
return ("This is a test", "This is a test")
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Any = """</s>"""
lowerCAmelCase_ :Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(__A ) , 1103 )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase_ :Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase_ :Optional[Any] = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
lowerCAmelCase_ :List[str] = rust_tokenizer([raw_input_str] , return_tensors=__A , add_special_tokens=__A ).input_ids[0]
lowerCAmelCase_ :List[Any] = py_tokenizer([raw_input_str] , return_tensors=__A , add_special_tokens=__A ).input_ids[0]
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Dict = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase_ :int = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
lowerCAmelCase_ :Any = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase_ :Dict = tokenizer([raw_input_str] , return_tensors=__A ).input_ids[0]
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :int = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase_ :Dict = """To ensure a smooth flow of bank resolutions."""
lowerCAmelCase_ :List[str] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase_ :Union[str, Any] = tokenizer([raw_input_str] , return_tensors=__A ).input_ids[0]
self.assertListEqual(__A , __A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Dict = ["""This is going to be way too long.""" * 150, """short example"""]
lowerCAmelCase_ :int = ["""not super long but more than 5 tokens""", """tiny"""]
lowerCAmelCase_ :str = self._large_tokenizer(__A , padding=__A , truncation=__A , return_tensors="""pt""" )
lowerCAmelCase_ :Dict = self._large_tokenizer(
text_target=__A , max_length=5 , padding=__A , truncation=__A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(__A ) == 2 # input_ids, attention_mask.
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
# fmt: off
lowerCAmelCase_ :Dict = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Optional[Any] = PegasusTokenizer
UpperCAmelCase_ :Any = PegasusTokenizerFast
UpperCAmelCase_ :Optional[Any] = True
UpperCAmelCase_ :List[Any] = True
def __lowerCAmelCase ( self ) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ :Any = PegasusTokenizer(__A , offset=0 , mask_token_sent=__A , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCAmelCase ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def __lowerCAmelCase ( self , **__A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__A )
def __lowerCAmelCase ( self , __A ) -> Tuple:
return ("This is a test", "This is a test")
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase_ :Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase_ :int = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
lowerCAmelCase_ :int = rust_tokenizer([raw_input_str] , return_tensors=__A , add_special_tokens=__A ).input_ids[0]
lowerCAmelCase_ :List[Any] = py_tokenizer([raw_input_str] , return_tensors=__A , add_special_tokens=__A ).input_ids[0]
self.assertListEqual(__A , __A )
@require_torch
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[Any] = ["""This is going to be way too long.""" * 1000, """short example"""]
lowerCAmelCase_ :List[str] = ["""not super long but more than 5 tokens""", """tiny"""]
lowerCAmelCase_ :Any = self._large_tokenizer(__A , padding=__A , truncation=__A , return_tensors="""pt""" )
lowerCAmelCase_ :Any = self._large_tokenizer(
text_target=__A , max_length=5 , padding=__A , truncation=__A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(__A ) == 2 # input_ids, attention_mask.
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Dict = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
lowerCAmelCase_ :Any = self._large_tokenizer(__A ).input_ids
self.assertListEqual(
__A , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 84
|
"""simple docstring"""
__A : Any = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 33
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
@property
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = self.dummy_uncond_unet
snake_case_ = KarrasVeScheduler()
snake_case_ = KarrasVePipeline(unet=a__ , scheduler=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(num_inference_steps=2 , generator=a__ , output_type="numpy" ).images
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(num_inference_steps=2 , generator=a__ , output_type="numpy" , return_dict=a__ )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = "google/ncsnpp-celebahq-256"
snake_case_ = UNetaDModel.from_pretrained(a__ )
snake_case_ = KarrasVeScheduler()
snake_case_ = KarrasVePipeline(unet=a__ , scheduler=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(num_inference_steps=20 , generator=a__ , output_type="numpy" ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case_ = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 85
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A : List[Any] = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Optional[int] = UnCLIPImageVariationPipeline
A_ : Optional[int] = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
A_ : Dict = IMAGE_VARIATION_BATCH_PARAMS
A_ : Dict = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
A_ : Dict = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 1_00
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(_SCREAMING_SNAKE_CASE )
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(_SCREAMING_SNAKE_CASE )
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : List[str] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
__lowerCAmelCase : Tuple = UnCLIPTextProjModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[int] = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
__lowerCAmelCase : Optional[Any] = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : List[Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def __lowerCamelCase ( self ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
__lowerCAmelCase : Dict = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.dummy_decoder
__lowerCAmelCase : int = self.dummy_text_proj
__lowerCAmelCase : int = self.dummy_text_encoder
__lowerCAmelCase : Any = self.dummy_tokenizer
__lowerCAmelCase : List[Any] = self.dummy_super_res_first
__lowerCAmelCase : List[str] = self.dummy_super_res_last
__lowerCAmelCase : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=10_00 , )
__lowerCAmelCase : Optional[Any] = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=10_00 , )
__lowerCAmelCase : Dict = CLIPImageProcessor(crop_size=32 , size=32 )
__lowerCAmelCase : Any = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
if pil_image:
__lowerCAmelCase : List[str] = input_image * 0.5 + 0.5
__lowerCAmelCase : Optional[int] = input_image.clamp(0 , 1 )
__lowerCAmelCase : Optional[int] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowerCAmelCase : Any = DiffusionPipeline.numpy_to_pil(_SCREAMING_SNAKE_CASE )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = 'cpu'
__lowerCAmelCase : Optional[int] = self.get_dummy_components()
__lowerCAmelCase : Tuple = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = pipe(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = output.images
__lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pipe(
**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
__lowerCAmelCase : Any = image[0, -3:, -3:, -1]
__lowerCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : Any = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = 'cpu'
__lowerCAmelCase : Any = self.get_dummy_components()
__lowerCAmelCase : Tuple = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = pipe(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = output.images
__lowerCAmelCase : Any = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = pipe(
**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
__lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : Union[str, Any] = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = 'cpu'
__lowerCAmelCase : Optional[int] = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
__lowerCAmelCase : Dict = pipe(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = output.images
__lowerCAmelCase : List[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
__lowerCAmelCase : List[Any] = pipe(
**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
__lowerCAmelCase : Any = image[0, -3:, -3:, -1]
__lowerCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
__lowerCAmelCase : Union[str, Any] = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = torch.device('cpu' )
class A__ :
A_ : Any = 1
__lowerCAmelCase : Any = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCAmelCase : Any = pipe.decoder.dtype
__lowerCAmelCase : Union[str, Any] = 1
__lowerCAmelCase : Dict = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
__lowerCAmelCase : List[Any] = pipe.prepare_latents(
_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , scheduler=DummyScheduler() )
__lowerCAmelCase : Union[str, Any] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
__lowerCAmelCase : int = pipe.prepare_latents(
_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , scheduler=DummyScheduler() )
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = pipe(
**_SCREAMING_SNAKE_CASE , decoder_latents=_SCREAMING_SNAKE_CASE , super_res_latents=_SCREAMING_SNAKE_CASE ).images
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
# Don't pass image, instead pass embedding
__lowerCAmelCase : List[str] = pipeline_inputs.pop('image' )
__lowerCAmelCase : Optional[Any] = pipe.image_encoder(_SCREAMING_SNAKE_CASE ).image_embeds
__lowerCAmelCase : Union[str, Any] = pipe(
**_SCREAMING_SNAKE_CASE , decoder_latents=_SCREAMING_SNAKE_CASE , super_res_latents=_SCREAMING_SNAKE_CASE , image_embeddings=_SCREAMING_SNAKE_CASE , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
__lowerCAmelCase : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=_SCREAMING_SNAKE_CASE )
@skip_mps
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = torch_device == 'cpu'
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , additional_params_copy_to_batched_inputs=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
__lowerCAmelCase : str = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=_SCREAMING_SNAKE_CASE , additional_params_copy_to_batched_inputs=_SCREAMING_SNAKE_CASE , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=_SCREAMING_SNAKE_CASE )
@skip_mps
def __lowerCamelCase ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __lowerCamelCase ( self ):
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
__lowerCAmelCase : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
__lowerCAmelCase : int = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
__lowerCAmelCase : List[str] = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCAmelCase : List[str] = pipeline(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , output_type='np' , )
__lowerCAmelCase : int = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 15 )
| 86
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__A : List[str] = '''examples/'''
__A : int = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__A : Dict = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__A : Optional[int] = '''README.md'''
def lowercase ( __snake_case : int , __snake_case : Any , __snake_case : int ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ : int = f.read()
lowercase_ , lowercase_ : List[str] = REPLACE_PATTERNS[pattern]
lowercase_ : Union[str, Any] = replace.replace('''VERSION''' , __snake_case )
lowercase_ : Optional[Any] = re_pattern.sub(__snake_case , __snake_case )
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__snake_case )
def lowercase ( __snake_case : int ):
for folder, directories, fnames in os.walk(__snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__snake_case , __snake_case ) , __snake_case , pattern='''examples''' )
def lowercase ( __snake_case : Optional[Any] , __snake_case : Optional[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__snake_case , __snake_case , __snake_case )
if not patch:
update_version_in_examples(__snake_case )
def lowercase ( ):
lowercase_ : Union[str, Any] = '''🤗 Transformers currently provides the following architectures'''
lowercase_ : Union[str, Any] = '''1. Want to contribute a new model?'''
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ : List[str] = f.readlines()
# Find the start of the list.
lowercase_ : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase_ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowercase_ : str = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__snake_case )
def lowercase ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowercase_ : List[Any] = f.read()
lowercase_ : List[str] = REPLACE_PATTERNS['''init'''][0].search(__snake_case ).groups()[0]
return packaging.version.parse(__snake_case )
def lowercase ( __snake_case : Optional[Any]=False ):
lowercase_ : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowercase_ : Optional[Any] = default_version.base_version
elif patch:
lowercase_ : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowercase_ : Optional[int] = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowercase_ : int = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__snake_case ) == 0:
lowercase_ : Dict = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__snake_case , patch=__snake_case )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowercase ( ):
lowercase_ : List[Any] = get_version()
lowercase_ : List[str] = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowercase_ : Any = current_version.base_version
# Check with the user we got that right.
lowercase_ : Tuple = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__snake_case ) == 0:
lowercase_ : str = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__snake_case )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__A : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 33
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class snake_case_ ( __A ):
__A : Union[str, Any] = "visual_bert"
def __init__( self : Any , lowercase_ : Any=3_05_22 , lowercase_ : List[Any]=7_68 , lowercase_ : List[str]=5_12 , lowercase_ : Dict=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : List[Any]=30_72 , lowercase_ : Dict="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : int=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : int=2 , lowercase_ : List[Any]=0.02 , lowercase_ : List[Any]=1E-12 , lowercase_ : str=False , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=1 , lowercase_ : List[str]=0 , lowercase_ : str=2 , **lowercase_ : Dict , ) -> List[str]:
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = max_position_embeddings
lowercase__ : List[str] = hidden_size
lowercase__ : str = visual_embedding_dim
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : str = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Any = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Dict = initializer_range
lowercase__ : str = type_vocab_size
lowercase__ : Any = layer_norm_eps
lowercase__ : Tuple = bypass_transformer
lowercase__ : Tuple = special_visual_initialize
| 87
|
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowercase ( __snake_case : str , __snake_case : str , __snake_case : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
lowercase_ : Union[str, Any] = quote(__snake_case )
return hfh.hf_hub_url(__snake_case , __snake_case , repo_type='''dataset''' , revision=__snake_case )
| 33
| 0
|
import os
__lowerCAmelCase : int = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = 0
__magic_name__ = 0
while index < len(A_ ) - 1:
__magic_name__ = SYMBOLS[numerals[index]]
__magic_name__ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = """"""
__magic_name__ = num // 1000
numerals += m_count * "M"
num %= 1000
__magic_name__ = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
__magic_name__ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a__ ( A_ = "/p089_roman.txt" ):
'''simple docstring'''
__magic_name__ = 0
with open(os.path.dirname(A_ ) + roman_numerals_filename ) as filea:
__magic_name__ = filea.readlines()
for line in lines:
__magic_name__ = line.strip()
__magic_name__ = parse_roman_numerals(A_ )
__magic_name__ = generate_roman_numerals(A_ )
savings += len(A_ ) - len(A_ )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 88
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
def __init__( self : int , A : Tuple , A : int=3 , A : List[str]=32 , A : Dict=3 , A : Any=10 , A : Dict=[10, 20, 30, 40] , A : Optional[Any]=[1, 1, 2, 1] , A : Union[str, Any]=True , A : Optional[Any]=True , A : Any="relu" , A : Optional[Any]=3 , A : Tuple=None , ) -> Dict:
lowercase_ : str = parent
lowercase_ : List[Any] = batch_size
lowercase_ : Optional[int] = image_size
lowercase_ : int = num_channels
lowercase_ : int = embeddings_size
lowercase_ : str = hidden_sizes
lowercase_ : List[str] = depths
lowercase_ : Dict = is_training
lowercase_ : int = use_labels
lowercase_ : Any = hidden_act
lowercase_ : List[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Optional[Any] = len(A )
def A ( self : str ) -> Tuple:
lowercase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Union[str, Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def A ( self : Dict ) -> int:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A ( self : str , A : Tuple , A : str , A : str ) -> str:
lowercase_ : str = TFResNetModel(config=A )
lowercase_ : Union[str, Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : Any , A : int , A : List[Any] , A : Optional[Any] ) -> Optional[Any]:
lowercase_ : Tuple = self.num_labels
lowercase_ : Union[str, Any] = TFResNetForImageClassification(A )
lowercase_ : Tuple = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Union[str, Any] ) -> Tuple:
lowercase_ : Tuple = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Dict = config_and_inputs
lowercase_ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : List[Any] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Any = False
def A ( self : Union[str, Any] ) -> List[Any]:
lowercase_ : int = TFResNetModelTester(self )
lowercase_ : str = ConfigTester(self , config_class=A , has_text_modality=A )
def A ( self : Dict ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Dict ) -> List[Any]:
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def A ( self : Any ) -> Any:
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def A ( self : List[str] ) -> Optional[Any]:
pass
def A ( self : str ) -> Tuple:
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : int = model_class(A )
lowercase_ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : str = [*signature.parameters.keys()]
lowercase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def A ( self : List[str] ) -> Tuple:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A ( self : List[Any] ) -> List[str]:
def check_hidden_states_output(A : Union[str, Any] , A : int , A : List[Any] ):
lowercase_ : int = model_class(A )
lowercase_ : Optional[Any] = model(**self._prepare_for_class(A , A ) )
lowercase_ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Any = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Union[str, Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : List[str] = layer_type
lowercase_ : Tuple = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[Any] = True
check_hidden_states_output(A , A , A )
def A ( self : Optional[int] ) -> Tuple:
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def A ( self : List[str] ) -> Optional[int]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Tuple = TFResNetModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase ( ):
lowercase_ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def A ( self : Any ) -> Optional[int]:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : Any ) -> Optional[int]:
lowercase_ : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase_ : List[Any] = self.default_image_processor
lowercase_ : Dict = prepare_img()
lowercase_ : List[str] = image_processor(images=A , return_tensors='''tf''' )
# forward pass
lowercase_ : Tuple = model(**A )
# verify the logits
lowercase_ : Optional[int] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A )
lowercase_ : Optional[Any] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , A , atol=1e-4 ) )
| 33
| 0
|
'''simple docstring'''
import string
def __lowerCamelCase ( lowerCAmelCase_ ) -> None:
for key in range(len(string.ascii_uppercase ) ):
_a : Union[str, Any] = ''
for symbol in message:
if symbol in string.ascii_uppercase:
_a : Optional[Any] = string.ascii_uppercase.find(lowerCAmelCase_ )
_a : List[str] = num - key
if num < 0:
_a : str = num + len(string.ascii_uppercase )
_a : int = translated + string.ascii_uppercase[num]
else:
_a : Dict = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def __lowerCamelCase ( ) -> None:
_a : int = input('Encrypted message: ' )
_a : Tuple = message.upper()
decrypt(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 89
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__A : Dict = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class _UpperCAmelCase ( unittest.TestCase , _A ):
def A ( self : List[Any] ) -> Dict:
lowercase_ : Optional[int] = load_tool('''text-question-answering''' )
self.tool.setup()
lowercase_ : Union[str, Any] = load_tool('''text-question-answering''' , remote=A )
def A ( self : Any ) -> List[str]:
lowercase_ : Union[str, Any] = self.tool(A , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : str ) -> List[str]:
lowercase_ : int = self.remote_tool(A , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : List[Any] ) -> int:
lowercase_ : Optional[Any] = self.tool(text=A , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : List[str] ) -> Optional[int]:
lowercase_ : int = self.remote_tool(text=A , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
| 33
| 0
|
from itertools import permutations
def lowerCamelCase_ ( UpperCamelCase__ : tuple ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__lowerCamelCase = [7, 11, 13, 17]
for i, test in enumerate(UpperCamelCase__ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCamelCase_ ( UpperCamelCase__ : int = 10 ) -> int:
"""simple docstring"""
return sum(
int(''.join(map(UpperCamelCase__ , UpperCamelCase__ ) ) )
for num in permutations(range(UpperCamelCase__ ) )
if is_substring_divisible(UpperCamelCase__ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 90
|
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _UpperCAmelCase ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self : Any , A : int=None , **A : str ) -> Union[str, Any]:
super().__init__(features=A )
lowercase_ : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def A ( self : Dict , A : int ) -> List[Any]:
import torch
if isinstance(A , A ) and column:
if all(
isinstance(A , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(A )
return column
def A ( self : int , A : Any ) -> Optional[Any]:
import torch
if isinstance(A , (str, bytes, type(A )) ):
return value
elif isinstance(A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase_ : Any = {}
if isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowercase_ : Any = {'''dtype''': torch.intaa}
elif isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase_ : Dict = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A , PIL.Image.Image ):
lowercase_ : Dict = np.asarray(A )
return torch.tensor(A , **{**default_dtype, **self.torch_tensor_kwargs} )
def A ( self : Union[str, Any] , A : Optional[int] ) -> str:
import torch
# support for torch, tf, jax etc.
if hasattr(A , '''__array__''' ) and not isinstance(A , torch.Tensor ):
lowercase_ : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
elif isinstance(A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
return self._tensorize(A )
def A ( self : Dict , A : dict ) -> Tuple:
return map_nested(self._recursive_tensorize , A , map_list=A )
def A ( self : str , A : pa.Table ) -> Mapping:
lowercase_ : Optional[Any] = self.numpy_arrow_extractor().extract_row(A )
lowercase_ : str = self.python_features_decoder.decode_row(A )
return self.recursive_tensorize(A )
def A ( self : List[Any] , A : pa.Table ) -> "torch.Tensor":
lowercase_ : List[str] = self.numpy_arrow_extractor().extract_column(A )
lowercase_ : str = self.python_features_decoder.decode_column(A , pa_table.column_names[0] )
lowercase_ : Optional[int] = self.recursive_tensorize(A )
lowercase_ : Any = self._consolidate(A )
return column
def A ( self : List[str] , A : pa.Table ) -> Mapping:
lowercase_ : Optional[int] = self.numpy_arrow_extractor().extract_batch(A )
lowercase_ : int = self.python_features_decoder.decode_batch(A )
lowercase_ : Dict = self.recursive_tensorize(A )
for column_name in batch:
lowercase_ : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 33
| 0
|
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
UpperCAmelCase_ : Tuple = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
UpperCAmelCase_ : Dict = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
UpperCAmelCase_ : Any = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''')),
'''references''': datasets.Sequence(datasets.Value('''int32''')),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32'''),
'''references''': datasets.Value('''int32'''),
}) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : int=None , lowercase_ : int=1 , lowercase_ : Any="binary" , lowercase_ : Dict=None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = fa_score(
lowercase_ , lowercase_ , labels=lowercase_ , pos_label=lowercase_ , average=lowercase_ , sample_weight=lowercase_)
return {"f1": float(lowercase_) if score.size == 1 else score}
| 91
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 33
| 0
|
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class a__ :
def __init__( self , _A , _A=9_9 , _A=1_3 , _A=1_6 , _A=7 , _A=True , _A=True , _A=True , _A=False , _A=True , _A=2 , _A=3_2 , _A=4 , _A=4 , _A=3_0 , _A=0 , _A=1 , _A=2 , _A=None , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = decoder_seq_length
# For common tests
__lowerCAmelCase = self.decoder_seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_attention_mask
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = d_model
__lowerCAmelCase = d_model
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = decoder_start_token_id
__lowerCAmelCase = use_cache
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = None
__lowerCAmelCase = decoder_seq_length
__lowerCAmelCase = 2
__lowerCAmelCase = 1
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_attention_mask:
__lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__lowerCAmelCase = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , ):
"""simple docstring"""
__lowerCAmelCase = True
__lowerCAmelCase = TrOCRDecoder(config=_A ).to(_A ).eval()
__lowerCAmelCase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__lowerCAmelCase = model(_A , use_cache=_A )
__lowerCAmelCase = model(_A )
__lowerCAmelCase = model(_A , use_cache=_A )
self.parent.assertTrue(len(_A ) == len(_A ) )
self.parent.assertTrue(len(_A ) == len(_A ) + 1 )
__lowerCAmelCase = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCAmelCase = model(_A )["last_hidden_state"]
__lowerCAmelCase = model(_A , past_key_values=_A )["last_hidden_state"]
# select random slice
__lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCAmelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_A , _A , atol=1E-3 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class a__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_a : List[Any] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
_a : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
_a : Union[str, Any] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
_a : List[Any] = True
_a : Dict = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TrOCRStandaloneDecoderModelTester(self , is_training=_A )
__lowerCAmelCase = ConfigTester(self , config_class=_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
| 92
|
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__A : Union[str, Any] = logging.get_logger(__name__)
# General docstring
__A : Tuple = '''MobileNetV1Config'''
# Base docstring
__A : Union[str, Any] = '''google/mobilenet_v1_1.0_224'''
__A : Union[str, Any] = [1, 1_024, 7, 7]
# Image classification docstring
__A : Optional[Any] = '''google/mobilenet_v1_1.0_224'''
__A : List[Any] = '''tabby, tabby cat'''
__A : Union[str, Any] = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowercase ( __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Dict=None ):
lowercase_ : str = {}
if isinstance(__snake_case , __snake_case ):
lowercase_ : Union[str, Any] = model.mobilenet_va
else:
lowercase_ : Optional[Any] = model
lowercase_ : Union[str, Any] = '''MobilenetV1/Conv2d_0/'''
lowercase_ : Union[str, Any] = backbone.conv_stem.convolution.weight
lowercase_ : Optional[Any] = backbone.conv_stem.normalization.bias
lowercase_ : Union[str, Any] = backbone.conv_stem.normalization.weight
lowercase_ : Any = backbone.conv_stem.normalization.running_mean
lowercase_ : int = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
lowercase_ : Optional[int] = i + 1
lowercase_ : Union[str, Any] = i * 2
lowercase_ : Optional[Any] = backbone.layer[pt_index]
lowercase_ : Union[str, Any] = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
lowercase_ : str = pointer.convolution.weight
lowercase_ : int = pointer.normalization.bias
lowercase_ : Any = pointer.normalization.weight
lowercase_ : Dict = pointer.normalization.running_mean
lowercase_ : Union[str, Any] = pointer.normalization.running_var
lowercase_ : Any = backbone.layer[pt_index + 1]
lowercase_ : Union[str, Any] = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
lowercase_ : int = pointer.convolution.weight
lowercase_ : str = pointer.normalization.bias
lowercase_ : Tuple = pointer.normalization.weight
lowercase_ : Dict = pointer.normalization.running_mean
lowercase_ : Any = pointer.normalization.running_var
if isinstance(__snake_case , __snake_case ):
lowercase_ : Optional[Any] = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
lowercase_ : Any = model.classifier.weight
lowercase_ : Optional[int] = model.classifier.bias
return tf_to_pt_map
def lowercase ( __snake_case : Optional[int] , __snake_case : int , __snake_case : Dict ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
lowercase_ : Tuple = tf.train.list_variables(__snake_case )
lowercase_ : int = {}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
lowercase_ : Optional[Any] = tf.train.load_variable(__snake_case , __snake_case )
lowercase_ : Optional[int] = array
# Build TF to PyTorch weights loading map
lowercase_ : Any = _build_tf_to_pytorch_map(__snake_case , __snake_case , __snake_case )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
lowercase_ : Union[str, Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
lowercase_ : Any = np.transpose(__snake_case , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
lowercase_ : Optional[int] = array.squeeze().transpose()
else:
lowercase_ : Optional[int] = np.transpose(__snake_case , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
lowercase_ : str = torch.from_numpy(__snake_case )
tf_weights.pop(__snake_case , __snake_case )
tf_weights.pop(name + '''/RMSProp''' , __snake_case )
tf_weights.pop(name + '''/RMSProp_1''' , __snake_case )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , __snake_case )
logger.info(F'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' )
return model
def lowercase ( __snake_case : torch.Tensor , __snake_case : nn.Convad ):
lowercase_ , lowercase_ : Optional[int] = features.shape[-2:]
lowercase_ , lowercase_ : str = conv_layer.stride
lowercase_ , lowercase_ : Tuple = conv_layer.kernel_size
if in_height % stride_height == 0:
lowercase_ : Dict = max(kernel_height - stride_height , 0 )
else:
lowercase_ : List[Any] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
lowercase_ : str = max(kernel_width - stride_width , 0 )
else:
lowercase_ : int = max(kernel_width - (in_width % stride_width) , 0 )
lowercase_ : int = pad_along_width // 2
lowercase_ : Union[str, Any] = pad_along_width - pad_left
lowercase_ : Tuple = pad_along_height // 2
lowercase_ : List[str] = pad_along_height - pad_top
lowercase_ : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__snake_case , __snake_case , '''constant''' , 0.0 )
class _UpperCAmelCase ( nn.Module ):
def __init__( self : List[Any] , A : MobileNetVaConfig , A : int , A : int , A : int , A : Optional[int] = 1 , A : Optional[int] = 1 , A : bool = False , A : Optional[bool] = True , A : Optional[bool or str] = True , ) -> None:
super().__init__()
lowercase_ : int = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
lowercase_ : Tuple = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowercase_ : int = nn.Convad(
in_channels=A , out_channels=A , kernel_size=A , stride=A , padding=A , groups=A , bias=A , padding_mode='''zeros''' , )
if use_normalization:
lowercase_ : Optional[Any] = nn.BatchNormad(
num_features=A , eps=config.layer_norm_eps , momentum=0.9997 , affine=A , track_running_stats=A , )
else:
lowercase_ : Union[str, Any] = None
if use_activation:
if isinstance(A , A ):
lowercase_ : str = ACTaFN[use_activation]
elif isinstance(config.hidden_act , A ):
lowercase_ : Any = ACTaFN[config.hidden_act]
else:
lowercase_ : Tuple = config.hidden_act
else:
lowercase_ : Tuple = None
def A ( self : str , A : torch.Tensor ) -> torch.Tensor:
if self.config.tf_padding:
lowercase_ : List[Any] = apply_tf_padding(A , self.convolution )
lowercase_ : Optional[int] = self.convolution(A )
if self.normalization is not None:
lowercase_ : Union[str, Any] = self.normalization(A )
if self.activation is not None:
lowercase_ : Optional[int] = self.activation(A )
return features
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Optional[int] = MobileNetVaConfig
SCREAMING_SNAKE_CASE_ : int = load_tf_weights_in_mobilenet_va
SCREAMING_SNAKE_CASE_ : Optional[Any] = "mobilenet_v1"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "pixel_values"
SCREAMING_SNAKE_CASE_ : List[str] = False
def A ( self : Any , A : Union[nn.Linear, nn.Convad] ) -> None:
if isinstance(A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(A , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__A : Union[str, Any] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__A : List[str] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , _A , )
class _UpperCAmelCase ( _A ):
def __init__( self : str , A : MobileNetVaConfig , A : bool = True ) -> int:
super().__init__(A )
lowercase_ : Union[str, Any] = config
lowercase_ : List[str] = 32
lowercase_ : str = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowercase_ : Union[str, Any] = MobileNetVaConvLayer(
A , in_channels=config.num_channels , out_channels=A , kernel_size=3 , stride=2 , )
lowercase_ : Optional[Any] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowercase_ : List[Any] = nn.ModuleList()
for i in range(13 ):
lowercase_ : Dict = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowercase_ : str = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
A , in_channels=A , out_channels=A , kernel_size=3 , stride=strides[i] , groups=A , ) )
self.layer.append(
MobileNetVaConvLayer(
A , in_channels=A , out_channels=A , kernel_size=1 , ) )
lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def A ( self : Any , A : Optional[Any] ) -> Optional[int]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : List[Any] , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
lowercase_ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
lowercase_ : List[str] = self.conv_stem(A )
lowercase_ : Dict = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowercase_ : Optional[int] = layer_module(A )
if output_hidden_states:
lowercase_ : str = all_hidden_states + (hidden_states,)
lowercase_ : Tuple = hidden_states
if self.pooler is not None:
lowercase_ : Dict = torch.flatten(self.pooler(A ) , start_dim=1 )
else:
lowercase_ : Optional[Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A , pooler_output=A , hidden_states=A , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _A , )
class _UpperCAmelCase ( _A ):
def __init__( self : List[str] , A : MobileNetVaConfig ) -> None:
super().__init__(A )
lowercase_ : int = config.num_labels
lowercase_ : List[str] = MobileNetVaModel(A )
lowercase_ : Union[str, Any] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowercase_ : Tuple = nn.Dropout(config.classifier_dropout_prob , inplace=A )
lowercase_ : int = nn.Linear(A , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Optional[Any] , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : List[Any] = self.mobilenet_va(A , output_hidden_states=A , return_dict=A )
lowercase_ : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : Dict = self.classifier(self.dropout(A ) )
lowercase_ : int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ : List[str] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ : Optional[Any] = '''single_label_classification'''
else:
lowercase_ : Tuple = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase_ : str = MSELoss()
if self.num_labels == 1:
lowercase_ : List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ : List[str] = loss_fct(A , A )
elif self.config.problem_type == "single_label_classification":
lowercase_ : List[Any] = CrossEntropyLoss()
lowercase_ : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ : str = BCEWithLogitsLoss()
lowercase_ : List[Any] = loss_fct(A , A )
if not return_dict:
lowercase_ : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=A , logits=A , hidden_states=outputs.hidden_states , )
| 33
| 0
|
'''simple docstring'''
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class lowerCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = RoFormerTokenizer
lowerCAmelCase_ = RoFormerTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def _snake_case ( self ):
"""simple docstring"""
super().setUp()
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = '''永和服装饰品有限公司,今天天气非常好'''
lowercase_ : str = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = self.get_tokenizer()
lowercase_ , lowercase_ : Any = self.get_chinese_input_output_texts()
lowercase_ : Any = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , output_text.split() )
lowercase_ : List[str] = tokens + [tokenizer.unk_token]
lowercase_ : int = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = self.get_rust_tokenizer()
lowercase_ , lowercase_ : Optional[Any] = self.get_chinese_input_output_texts()
lowercase_ : Any = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , output_text.split() )
lowercase_ : str = tokens + [tokenizer.unk_token]
lowercase_ : List[Any] = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
pass
| 93
|
"""simple docstring"""
def lowercase ( __snake_case : list[int] ):
lowercase_ : List[Any] = len(__snake_case )
for i in range(__snake_case ):
for j in range(i + 1 , __snake_case ):
if numbers[j] < numbers[i]:
lowercase_ , lowercase_ : Optional[int] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__A : int = input('''Enter numbers separated by a comma:\n''').strip()
__A : Any = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 33
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case : int = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
snake_case : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 94
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : List[Any] , A : Any , A : Tuple=7 , A : Tuple=3 , A : Optional[Any]=30 , A : List[Any]=4_00 , A : Tuple=True , A : Dict=None , A : List[str]=True , A : Optional[int]=[0.5, 0.5, 0.5] , A : Tuple=[0.5, 0.5, 0.5] , A : List[str]=True , A : List[Any]=1 / 2_55 , A : Union[str, Any]=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase_ : Optional[int] = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
lowercase_ : Optional[int] = parent
lowercase_ : str = batch_size
lowercase_ : Tuple = num_channels
lowercase_ : str = min_resolution
lowercase_ : Any = max_resolution
lowercase_ : str = do_resize
lowercase_ : Any = size
lowercase_ : Optional[int] = do_normalize
lowercase_ : List[str] = image_mean
lowercase_ : Optional[Any] = image_std
lowercase_ : int = do_rescale
lowercase_ : List[str] = rescale_factor
lowercase_ : int = do_pad
def A ( self : Any ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A ( self : Optional[Any] , A : int , A : int=False ) -> Tuple:
if not batched:
lowercase_ : Optional[int] = image_inputs[0]
if isinstance(A , Image.Image ):
lowercase_ , lowercase_ : int = image.size
else:
lowercase_ , lowercase_ : Tuple = image.shape[1], image.shape[2]
if w < h:
lowercase_ : int = int(self.size['''shortest_edge'''] * h / w )
lowercase_ : Optional[Any] = self.size['''shortest_edge''']
elif w > h:
lowercase_ : Optional[Any] = self.size['''shortest_edge''']
lowercase_ : Optional[int] = int(self.size['''shortest_edge'''] * w / h )
else:
lowercase_ : Any = self.size['''shortest_edge''']
lowercase_ : Any = self.size['''shortest_edge''']
else:
lowercase_ : Tuple = []
for image in image_inputs:
lowercase_ , lowercase_ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase_ : Union[str, Any] = max(A , key=lambda A : item[0] )[0]
lowercase_ : Optional[Any] = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCAmelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = YolosImageProcessor if is_vision_available() else None
def A ( self : Optional[int] ) -> Optional[int]:
lowercase_ : Optional[Any] = YolosImageProcessingTester(self )
@property
def A ( self : str ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Optional[int] ) -> List[str]:
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def A ( self : Dict ) -> Tuple:
lowercase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , A )
lowercase_ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , A )
def A ( self : Optional[int] ) -> Tuple:
pass
def A ( self : Tuple ) -> int:
# Initialize image_processing
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowercase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ , lowercase_ : Dict = self.image_processor_tester.get_expected_values(A , batched=A )
lowercase_ : str = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : str ) -> Any:
# Initialize image_processing
lowercase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowercase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : int = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ : Optional[int] = image_processing(A , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : List[Any] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Optional[int]:
# Initialize image_processing
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowercase_ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ : Any = image_processing(A , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : List[str] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Optional[Any]:
# Initialize image_processings
lowercase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
lowercase_ : Tuple = self.image_processing_class(do_resize=A , do_normalize=A , do_rescale=A )
# create random PyTorch tensors
lowercase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowercase_ : Union[str, Any] = image_processing_a.pad(A , return_tensors='''pt''' )
lowercase_ : List[Any] = image_processing_a(A , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def A ( self : str ) -> List[Any]:
# prepare image and target
lowercase_ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowercase_ : List[Any] = json.loads(f.read() )
lowercase_ : Tuple = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
lowercase_ : Union[str, Any] = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
lowercase_ : List[Any] = image_processing(images=A , annotations=A , return_tensors='''pt''' )
# verify pixel values
lowercase_ : Union[str, Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowercase_ : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowercase_ : Tuple = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowercase_ : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowercase_ : Any = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1e-3 ) )
# verify image_id
lowercase_ : List[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowercase_ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowercase_ : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify orig_size
lowercase_ : List[str] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowercase_ : Optional[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
@slow
def A ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
lowercase_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowercase_ : str = json.loads(f.read() )
lowercase_ : int = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
lowercase_ : List[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowercase_ : int = YolosImageProcessor(format='''coco_panoptic''' )
lowercase_ : Any = image_processing(images=A , annotations=A , masks_path=A , return_tensors='''pt''' )
# verify pixel values
lowercase_ : Optional[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowercase_ : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowercase_ : List[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowercase_ : str = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowercase_ : List[str] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1e-3 ) )
# verify image_id
lowercase_ : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowercase_ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowercase_ : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify masks
lowercase_ : Dict = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A )
# verify orig_size
lowercase_ : Tuple = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowercase_ : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
| 33
| 0
|
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
a__ : Optional[int] =[1]
for i in range(2 , SCREAMING_SNAKE_CASE ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
a__ : int =[]
a__ : Optional[Any] =list(range(SCREAMING_SNAKE_CASE ) )
# Find permutation
while factorials:
a__ : int =factorials.pop()
a__ , a__ : int =divmod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95
|
"""simple docstring"""
def lowercase ( __snake_case : int = 1_0_0 ):
lowercase_ : str = 0
lowercase_ : List[Any] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 33
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 96
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__A : str = parser.parse_args()
__A : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__A : Dict = CLIPImageProcessor()
__A : Union[str, Any] = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__A : List[str] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 33
| 0
|
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
__snake_case = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
__snake_case = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
__snake_case = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ , sample_weight=UpperCamelCase_ ) ),
}
| 97
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Any = KandinskyVaaControlnetImgaImgPipeline
SCREAMING_SNAKE_CASE_ : Optional[int] = ["image_embeds", "negative_image_embeds", "image", "hint"]
SCREAMING_SNAKE_CASE_ : str = ["image_embeds", "negative_image_embeds", "image", "hint"]
SCREAMING_SNAKE_CASE_ : Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
SCREAMING_SNAKE_CASE_ : Dict = False
@property
def A ( self : Any ) -> Any:
return 32
@property
def A ( self : Optional[int] ) -> Any:
return 32
@property
def A ( self : Dict ) -> int:
return self.time_input_dim
@property
def A ( self : Tuple ) -> str:
return self.time_input_dim * 4
@property
def A ( self : Any ) -> str:
return 1_00
@property
def A ( self : str ) -> List[str]:
torch.manual_seed(0 )
lowercase_ : List[Any] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase_ : Dict = UNetaDConditionModel(**A )
return model
@property
def A ( self : Optional[Any] ) -> Union[str, Any]:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def A ( self : List[Any] ) -> Dict:
torch.manual_seed(0 )
lowercase_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ : Tuple = self.dummy_unet
lowercase_ : int = self.dummy_movq
lowercase_ : List[Any] = {
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowercase_ : str = DDIMScheduler(**A )
lowercase_ : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def A ( self : Optional[int] , A : int , A : List[str]=0 ) -> int:
lowercase_ : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A ) ).to(A )
lowercase_ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A )
# create init_image
lowercase_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A ) ).to(A )
lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ : Optional[Any] = Image.fromarray(np.uinta(A ) ).convert('''RGB''' ).resize((2_56, 2_56) )
# create hint
lowercase_ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A ) ).to(A )
if str(A ).startswith('''mps''' ):
lowercase_ : Optional[Any] = torch.manual_seed(A )
else:
lowercase_ : List[Any] = torch.Generator(device=A ).manual_seed(A )
lowercase_ : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def A ( self : Any ) -> List[Any]:
lowercase_ : List[str] = '''cpu'''
lowercase_ : Any = self.get_dummy_components()
lowercase_ : Any = self.pipeline_class(**A )
lowercase_ : int = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowercase_ : Dict = pipe(**self.get_dummy_inputs(A ) )
lowercase_ : str = output.images
lowercase_ : int = pipe(
**self.get_dummy_inputs(A ) , return_dict=A , )[0]
lowercase_ : Dict = image[0, -3:, -3:, -1]
lowercase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ : List[str] = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : Tuple ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Any ) -> Optional[int]:
lowercase_ : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
lowercase_ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowercase_ : Optional[int] = init_image.resize((5_12, 5_12) )
lowercase_ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
lowercase_ : Optional[int] = torch.from_numpy(np.array(A ) ).float() / 255.0
lowercase_ : Tuple = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowercase_ : Optional[Any] = '''A robot, 4k photo'''
lowercase_ : Tuple = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(A )
lowercase_ : Dict = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
lowercase_ : int = pipeline.to(A )
pipeline.set_progress_bar_config(disable=A )
lowercase_ : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase_ , lowercase_ : int = pipe_prior(
A , image=A , strength=0.85 , generator=A , negative_prompt='''''' , ).to_tuple()
lowercase_ : str = pipeline(
image=A , image_embeds=A , negative_image_embeds=A , hint=A , generator=A , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type='''np''' , )
lowercase_ : Optional[Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(A , A )
| 33
| 0
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
UpperCAmelCase__ = DatasetInfosDict.from_directory(lowerCamelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=4_2 , ),
] , )
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = str(lowerCamelCase )
dataset_info.write_to_directory(lowerCamelCase )
UpperCAmelCase__ = DatasetInfo.from_directory(lowerCamelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCamelCase , 'dataset_info.json' ) )
def a_ ( ):
UpperCAmelCase__ = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
UpperCAmelCase__ = dataset_info._to_yaml_dict()
assert sorted(lowerCamelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
UpperCAmelCase__ = yaml.safe_dump(lowerCamelCase )
UpperCAmelCase__ = yaml.safe_load(lowerCamelCase )
assert dataset_info_yaml_dict == reloaded
def a_ ( ):
UpperCAmelCase__ = DatasetInfo()
UpperCAmelCase__ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=4_2 ),
'v2': DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = str(lowerCamelCase )
dataset_infos_dict.write_to_directory(lowerCamelCase )
UpperCAmelCase__ = DatasetInfosDict.from_directory(lowerCamelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
UpperCAmelCase__ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
UpperCAmelCase__ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCamelCase , 'README.md' ) )
| 98
|
"""simple docstring"""
def lowercase ( __snake_case : int = 1_0_0_0 ):
lowercase_ , lowercase_ : str = 1, 1
lowercase_ : List[str] = 2
while True:
lowercase_ : Tuple = 0
lowercase_ : List[Any] = fa + fa
lowercase_ , lowercase_ : Optional[int] = fa, f
index += 1
for _ in str(__snake_case ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 33
| 0
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase : List[str] = logging.get_logger(__name__)
@dataclass
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Optional[Any] = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **lowercase) -> Tuple:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
a__ : Union[str, Any] = deprecated_arg[3:]
setattr(self , lowercase , not kwargs.pop(lowercase))
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}')
a__ : Union[str, Any] = kwargs.pop('torchscript' , self.torchscript)
a__ : Tuple = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics)
a__ : Tuple = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level)
super().__init__(**lowercase)
__A : bool = field(default=__UpperCAmelCase , metadata={'''help''': '''Trace the models using torchscript'''} )
__A : bool = field(default=__UpperCAmelCase , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} )
__A : str = field(
default='''O1''' , metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} , )
@cached_property
def __lowercase ( self) -> Tuple["torch.device", int]:
'''simple docstring'''
requires_backends(self , ['torch'])
logger.info('PyTorch: setting up devices')
if not self.cuda:
a__ : List[str] = torch.device('cpu')
a__ : Optional[Any] = 0
elif is_torch_tpu_available():
a__ : List[str] = xm.xla_device()
a__ : Union[str, Any] = 0
else:
a__ : List[Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
a__ : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def __lowercase ( self) -> List[str]:
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def __lowercase ( self) -> int:
'''simple docstring'''
requires_backends(self , ['torch'])
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __lowercase ( self) -> "torch.device":
'''simple docstring'''
requires_backends(self , ['torch'])
return self._setup_devices[0]
@property
def __lowercase ( self) -> Dict:
'''simple docstring'''
requires_backends(self , ['torch'])
return self._setup_devices[1]
@property
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return self.n_gpu > 0
| 99
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : Union[str, Any] = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "vit_mae"
def __init__( self : Dict , A : List[str]=7_68 , A : Any=12 , A : Union[str, Any]=12 , A : Tuple=30_72 , A : Any="gelu" , A : Tuple=0.0 , A : List[str]=0.0 , A : Tuple=0.02 , A : Tuple=1e-12 , A : int=2_24 , A : Dict=16 , A : int=3 , A : Tuple=True , A : Tuple=16 , A : Optional[Any]=5_12 , A : Union[str, Any]=8 , A : List[Any]=20_48 , A : Dict=0.75 , A : Any=False , **A : Optional[int] , ) -> Union[str, Any]:
super().__init__(**A )
lowercase_ : List[Any] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : int = initializer_range
lowercase_ : Dict = layer_norm_eps
lowercase_ : Optional[Any] = image_size
lowercase_ : str = patch_size
lowercase_ : Dict = num_channels
lowercase_ : Any = qkv_bias
lowercase_ : Union[str, Any] = decoder_num_attention_heads
lowercase_ : Optional[Any] = decoder_hidden_size
lowercase_ : List[str] = decoder_num_hidden_layers
lowercase_ : List[Any] = decoder_intermediate_size
lowercase_ : Optional[Any] = mask_ratio
lowercase_ : Optional[Any] = norm_pix_loss
| 33
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=6 , lowerCAmelCase__=1_7 , lowerCAmelCase__=2_3 , lowerCAmelCase__=1_1 , lowerCAmelCase__=True , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = act_dim
__SCREAMING_SNAKE_CASE = state_dim
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = max_length
__SCREAMING_SNAKE_CASE = is_training
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = floats_tensor((self.batch_size, self.seq_length, self.state_dim))
__SCREAMING_SNAKE_CASE = floats_tensor((self.batch_size, self.seq_length, self.act_dim))
__SCREAMING_SNAKE_CASE = floats_tensor((self.batch_size, self.seq_length, 1))
__SCREAMING_SNAKE_CASE = floats_tensor((self.batch_size, self.seq_length, 1))
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0)
__SCREAMING_SNAKE_CASE = random_attention_mask((self.batch_size, self.seq_length))
__SCREAMING_SNAKE_CASE = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def snake_case_ ( self):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = DecisionTransformerModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
self.parent.assertEqual(result.state_preds.shape , states.shape)
self.parent.assertEqual(result.action_preds.shape , actions.shape)
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size)) # seq length *3 as there are 3 modelities: states, returns and actions
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __a , __a , __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Any = (DecisionTransformerModel,) if is_torch_available() else ()
__lowercase : List[str] = ()
__lowercase : str = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__lowercase : Dict = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__lowercase : Tuple = False
__lowercase : Optional[int] = False
__lowercase : str = False
__lowercase : str = False
__lowercase : Any = False
__lowercase : Union[str, Any] = False
__lowercase : str = False
__lowercase : Union[str, Any] = False
__lowercase : int = False
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = DecisionTransformerModelTester(self)
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7)
def snake_case_ ( self):
self.config_tester.run_common_tests()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
@slow
def snake_case_ ( self):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = DecisionTransformerModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(lowerCAmelCase__)] , lowerCAmelCase__)
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = 2 # number of steps of autoregressive prediction we will perform
__SCREAMING_SNAKE_CASE = 1_0 # defined by the RL environment, may be normalized
__SCREAMING_SNAKE_CASE = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""")
__SCREAMING_SNAKE_CASE = model.to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model.config
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = torch.randn(1 , 1 , config.state_dim).to(device=lowerCAmelCase__ , dtype=torch.floataa) # env.reset()
__SCREAMING_SNAKE_CASE = torch.tensor(
[[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.tensor(lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=torch.floataa).reshape(1 , 1 , 1)
__SCREAMING_SNAKE_CASE = state
__SCREAMING_SNAKE_CASE = torch.zeros(1 , 0 , config.act_dim , device=lowerCAmelCase__ , dtype=torch.floataa)
__SCREAMING_SNAKE_CASE = torch.zeros(1 , 0 , device=lowerCAmelCase__ , dtype=torch.floataa)
__SCREAMING_SNAKE_CASE = torch.tensor(0 , device=lowerCAmelCase__ , dtype=torch.long).reshape(1 , 1)
for step in range(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=lowerCAmelCase__)] , dim=1)
__SCREAMING_SNAKE_CASE = torch.cat([rewards, torch.zeros(1 , 1 , device=lowerCAmelCase__)] , dim=1)
__SCREAMING_SNAKE_CASE = torch.ones(1 , states.shape[1]).to(dtype=torch.long , device=states.device)
with torch.no_grad():
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = model(
states=lowerCAmelCase__ , actions=lowerCAmelCase__ , rewards=lowerCAmelCase__ , returns_to_go=lowerCAmelCase__ , timesteps=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )
self.assertEqual(action_pred.shape , actions.shape)
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4))
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim).to(device=lowerCAmelCase__ , dtype=torch.floataa),
1.0,
False,
{},
)
__SCREAMING_SNAKE_CASE = action_pred[0, -1]
__SCREAMING_SNAKE_CASE = torch.cat([states, state] , dim=1)
__SCREAMING_SNAKE_CASE = returns_to_go[0, -1] - reward
__SCREAMING_SNAKE_CASE = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1)] , dim=1)
__SCREAMING_SNAKE_CASE = torch.cat(
[timesteps, torch.ones((1, 1) , device=lowerCAmelCase__ , dtype=torch.long) * (step + 1)] , dim=1)
| 100
|
"""simple docstring"""
def lowercase ( __snake_case : int ):
if n == 1 or not isinstance(__snake_case , __snake_case ):
return 0
elif n == 2:
return 1
else:
lowercase_ : Dict = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase ( __snake_case : int ):
lowercase_ : str = 0
lowercase_ : List[str] = 2
while digits < n:
index += 1
lowercase_ : Any = len(str(fibonacci(__snake_case ) ) )
return index
def lowercase ( __snake_case : int = 1_0_0_0 ):
return fibonacci_digits_index(__snake_case )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 33
| 0
|
lowercase__ :Optional[int] = [
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 101
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A : List[str] = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''MobileNetV2FeatureExtractor''']
__A : Optional[int] = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33
| 0
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =field(default=__snake_case, metadata={'help': 'Whether to use SortishSampler or not.'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
}, )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = super().to_dict()
for k, v in d.items():
if isinstance(a_ , a_ ):
__snake_case : Tuple = v.to_dict()
return d
| 102
|
"""simple docstring"""
from __future__ import annotations
__A : List[Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__A : str = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = []
lowercase_ : List[Any] = len(__snake_case )
for i in range(__snake_case ):
lowercase_ : float = -1
for j in range(i + 1 , __snake_case ):
if arr[i] < arr[j]:
lowercase_ : List[str] = arr[j]
break
result.append(__snake_case )
return result
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = []
for i, outer in enumerate(__snake_case ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : List[Any] = inner
break
result.append(__snake_case )
return result
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = len(__snake_case )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(__snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__A : int = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 33
| 0
|
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Any = logging.get_logger(__name__)
A__ : int = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class __snake_case ( UpperCamelCase_ ):
_a = '''owlvit_text_model'''
def __init__( self : Optional[int] , A_ : List[Any]=4_9_4_0_8 , A_ : List[Any]=5_1_2 , A_ : int=2_0_4_8 , A_ : Dict=1_2 , A_ : Optional[Any]=8 , A_ : Dict=1_6 , A_ : Union[str, Any]="quick_gelu" , A_ : Tuple=1e-5 , A_ : List[str]=0.0 , A_ : Optional[Any]=0.02 , A_ : Union[str, Any]=1.0 , A_ : Tuple=0 , A_ : Optional[int]=4_9_4_0_6 , A_ : str=4_9_4_0_7 , **A_ : Any , ):
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_)
lowerCAmelCase_ : List[str] = vocab_size
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : Dict = num_hidden_layers
lowerCAmelCase_ : List[Any] = num_attention_heads
lowerCAmelCase_ : List[str] = max_position_embeddings
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : List[Any] = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = attention_dropout
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : Union[str, Any] = initializer_factor
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , A_ : Union[str, os.PathLike] , **A_ : Tuple):
cls._set_token_in_kwargs(A_)
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = cls.get_config_dict(A_ , **A_)
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''') == "owlvit":
lowerCAmelCase_ : Tuple = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(A_ , **A_)
class __snake_case ( UpperCamelCase_ ):
_a = '''owlvit_vision_model'''
def __init__( self : int , A_ : Dict=7_6_8 , A_ : int=3_0_7_2 , A_ : Optional[int]=1_2 , A_ : str=1_2 , A_ : List[Any]=3 , A_ : int=7_6_8 , A_ : Any=3_2 , A_ : List[Any]="quick_gelu" , A_ : int=1e-5 , A_ : Optional[int]=0.0 , A_ : Any=0.02 , A_ : Dict=1.0 , **A_ : int , ):
super().__init__(**A_)
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : Dict = num_attention_heads
lowerCAmelCase_ : List[str] = num_channels
lowerCAmelCase_ : Tuple = image_size
lowerCAmelCase_ : Union[str, Any] = patch_size
lowerCAmelCase_ : Optional[Any] = hidden_act
lowerCAmelCase_ : List[Any] = layer_norm_eps
lowerCAmelCase_ : List[Any] = attention_dropout
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : str = initializer_factor
@classmethod
def UpperCAmelCase__ ( cls : Tuple , A_ : Union[str, os.PathLike] , **A_ : Tuple):
cls._set_token_in_kwargs(A_)
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = cls.get_config_dict(A_ , **A_)
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''') == "owlvit":
lowerCAmelCase_ : str = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(A_ , **A_)
class __snake_case ( UpperCamelCase_ ):
_a = '''owlvit'''
_a = True
def __init__( self : Dict , A_ : Union[str, Any]=None , A_ : Tuple=None , A_ : Tuple=5_1_2 , A_ : Union[str, Any]=2.6592 , A_ : str=True , **A_ : Any , ):
super().__init__(**A_)
if text_config is None:
lowerCAmelCase_ : Tuple = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''')
if vision_config is None:
lowerCAmelCase_ : Dict = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''')
lowerCAmelCase_ : List[str] = OwlViTTextConfig(**A_)
lowerCAmelCase_ : List[str] = OwlViTVisionConfig(**A_)
lowerCAmelCase_ : str = projection_dim
lowerCAmelCase_ : Any = logit_scale_init_value
lowerCAmelCase_ : Any = return_dict
lowerCAmelCase_ : Optional[Any] = 1.0
@classmethod
def UpperCAmelCase__ ( cls : List[str] , A_ : Union[str, os.PathLike] , **A_ : int):
cls._set_token_in_kwargs(A_)
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = cls.get_config_dict(A_ , **A_)
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(A_ , **A_)
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , A_ : Dict , A_ : Dict , **A_ : str):
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Any = text_config
lowerCAmelCase_ : List[str] = vision_config
return cls.from_dict(A_ , **A_)
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : Any = copy.deepcopy(self.__dict__)
lowerCAmelCase_ : Any = self.text_config.to_dict()
lowerCAmelCase_ : List[Any] = self.vision_config.to_dict()
lowerCAmelCase_ : List[str] = self.__class__.model_type
return output
class __snake_case ( UpperCamelCase_ ):
@property
def UpperCAmelCase__ ( self : Any):
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
])
@property
def UpperCAmelCase__ ( self : Dict):
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
])
@property
def UpperCAmelCase__ ( self : List[str]):
return 1e-4
def UpperCAmelCase__ ( self : Optional[int] , A_ : "ProcessorMixin" , A_ : int = -1 , A_ : int = -1 , A_ : Optional["TensorType"] = None , ):
lowerCAmelCase_ : Optional[Any] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=A_ , seq_length=A_ , framework=A_)
lowerCAmelCase_ : int = super().generate_dummy_inputs(
processor.image_processor , batch_size=A_ , framework=A_)
return {**text_input_dict, **image_input_dict}
@property
def UpperCAmelCase__ ( self : Any):
return 1_4
| 103
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33
| 0
|
'''simple docstring'''
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : Dict ):
__lowercase = name
__lowercase = value
__lowercase = weight
def __repr__( self : Any ):
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def SCREAMING_SNAKE_CASE ( self : Dict ):
return self.value
def SCREAMING_SNAKE_CASE ( self : Any ):
return self.name
def SCREAMING_SNAKE_CASE ( self : str ):
return self.weight
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return self.value / self.weight
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = []
for i in range(len(A__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = sorted(A__ , key=A__ , reverse=A__ )
__lowercase = []
__lowercase , __lowercase = 0.0, 0.0
for i in range(len(A__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _A ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
|
"""simple docstring"""
def lowercase ( __snake_case : int ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33
| 0
|
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->Optional[Any]:
'''simple docstring'''
a : int = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
a : Dict = MaskFormerConfig(backbone_config=_lowercase )
a : int = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
a : Union[str, Any] = 847
a : Dict = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
a : List[str] = 150
a : List[str] = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
a : Dict = 171
a : Dict = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
a : Any = 133
a : str = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
a : List[str] = 19
a : Dict = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
a : List[Any] = 65
a : int = "mapillary-vistas-id2label.json"
a : int = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="dataset" ) , "r" ) )
a : Tuple = {int(_lowercase ): v for k, v in idalabel.items()}
return config
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] ) ->List[str]:
'''simple docstring'''
a : Union[str, Any] = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : Dict , _lowercase : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
a : str = dct.pop(_lowercase )
a : Dict = val
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
a : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a : str = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a : List[Any] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
a : Optional[int] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
a : Optional[int] = in_proj_weight[:dim, :]
a : Dict = in_proj_bias[: dim]
a : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
a : str = in_proj_bias[
dim : dim * 2
]
a : Union[str, Any] = in_proj_weight[
-dim :, :
]
a : Dict = in_proj_bias[-dim :]
# fmt: on
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : Optional[Any] ) ->Any:
'''simple docstring'''
a : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
a : int = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
a : List[str] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a : List[str] = in_proj_weight[: hidden_size, :]
a : Optional[int] = in_proj_bias[:config.hidden_size]
a : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
a : int = in_proj_bias[hidden_size : hidden_size * 2]
a : List[Any] = in_proj_weight[-hidden_size :, :]
a : Optional[int] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
a : Optional[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
a : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a : str = in_proj_weight[: hidden_size, :]
a : List[Any] = in_proj_bias[:config.hidden_size]
a : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
a : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
a : Union[str, Any] = in_proj_weight[-hidden_size :, :]
a : str = in_proj_bias[-hidden_size :]
# fmt: on
def _SCREAMING_SNAKE_CASE ( ) ->torch.Tensor:
'''simple docstring'''
a : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
a : Dict = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : str , _lowercase : str , _lowercase : bool = False ) ->List[Any]:
'''simple docstring'''
a : List[Any] = get_maskformer_config(_lowercase )
# load original state_dict
with open(_lowercase , "rb" ) as f:
a : Union[str, Any] = pickle.load(_lowercase )
a : Tuple = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
a : str = create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
read_in_swin_q_k_v(_lowercase , config.backbone_config )
read_in_decoder_q_k_v(_lowercase , _lowercase )
# update to torch tensors
for key, value in state_dict.items():
a : Dict = torch.from_numpy(_lowercase )
# load 🤗 model
a : Optional[int] = MaskFormerForInstanceSegmentation(_lowercase )
model.eval()
for name, param in model.named_parameters():
print(_lowercase , param.shape )
a, a : Union[str, Any] = model.load_state_dict(_lowercase , strict=_lowercase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_lowercase ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
a : Dict = prepare_img()
if "vistas" in model_name:
a : Union[str, Any] = 65
elif "cityscapes" in model_name:
a : List[Any] = 6_5535
else:
a : Dict = 255
a : Optional[int] = True if "ade" in model_name else False
a : List[Any] = MaskFormerImageProcessor(ignore_index=_lowercase , reduce_labels=_lowercase )
a : List[str] = image_processor(_lowercase , return_tensors="pt" )
a : Union[str, Any] = model(**_lowercase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
a : int = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowercase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a : str = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 105
|
"""simple docstring"""
def lowercase ( __snake_case : Optional[int] ):
lowercase_ : int = 0
lowercase_ : Optional[Any] = len(__snake_case )
for i in range(n - 1 ):
for j in range(i + 1 , __snake_case ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowercase ( __snake_case : str ):
if len(__snake_case ) <= 1:
return arr, 0
lowercase_ : Optional[Any] = len(__snake_case ) // 2
lowercase_ : List[Any] = arr[0:mid]
lowercase_ : Union[str, Any] = arr[mid:]
lowercase_ , lowercase_ : Tuple = count_inversions_recursive(__snake_case )
lowercase_ , lowercase_ : List[Any] = count_inversions_recursive(__snake_case )
lowercase_ , lowercase_ : List[Any] = _count_cross_inversions(__snake_case , __snake_case )
lowercase_ : List[Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowercase ( __snake_case : str , __snake_case : Optional[int] ):
lowercase_ : Optional[Any] = []
lowercase_ : Any = 0
while i < len(__snake_case ) and j < len(__snake_case ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__snake_case ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__snake_case ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowercase ( ):
lowercase_ : Union[str, Any] = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowercase_ : int = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : Dict = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __snake_case )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowercase_ : Dict = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : Dict = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __snake_case )
# an empty list should also have zero inversions
lowercase_ : List[Any] = []
lowercase_ : Any = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : List[str] = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __snake_case )
if __name__ == "__main__":
main()
| 33
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=a_ ):
"""simple docstring"""
lowercase__ = ["speech"]
def __init__( self : Tuple ,*lowercase_ : Tuple ,**lowercase_ : List[str] ):
requires_backends(self ,['''speech'''] )
class SCREAMING_SNAKE_CASE ( metaclass=a_ ):
"""simple docstring"""
lowercase__ = ["speech"]
def __init__( self : Union[str, Any] ,*lowercase_ : List[str] ,**lowercase_ : Any ):
requires_backends(self ,['''speech'''] )
| 106
|
"""simple docstring"""
__A : Any = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 33
| 0
|
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str]=7_68 ) -> Any:
super().__init__(__lowerCamelCase )
a = proj_size
a = CLIPVisionModel(__lowerCamelCase )
a = PaintByExampleMapper(__lowerCamelCase )
a = nn.LayerNorm(config.hidden_size )
a = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
a = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str]=False ) -> Tuple:
a = self.model(pixel_values=__lowerCamelCase )
a = clip_output.pooler_output
a = self.mapper(latent_states[:, None] )
a = self.final_layer_norm(__lowerCamelCase )
a = self.proj_out(__lowerCamelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : List[Any] ) -> Any:
super().__init__()
a = (config.num_hidden_layers + 1) // 5
a = config.hidden_size
a = 1
a = nn.ModuleList(
[
BasicTransformerBlock(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , activation_fn="gelu" , attention_bias=__lowerCamelCase )
for _ in range(__lowerCamelCase )
] )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : int ) -> Any:
for block in self.blocks:
a = block(__lowerCamelCase )
return hidden_states
| 107
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A : List[Any] = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33
| 0
|
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''Hello, World!'''
lowerCAmelCase__ = '''en_XX'''
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = Path("data_bin" )
lowerCAmelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(SCREAMING_SNAKE_CASE ) , bpe="sentencepiece" , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = xmod.model.encoder.sentence_encoder
lowerCAmelCase : List[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCAmelCase : List[str] = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = XmodForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCAmelCase : Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
lowerCAmelCase : Optional[int] = xmod_sent_encoder.embed_positions.weight
lowerCAmelCase : List[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCAmelCase : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCAmelCase : List[str] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCAmelCase : str = model.roberta.encoder.layer[i]
lowerCAmelCase : Dict = xmod_sent_encoder.layers[i]
# self attention
lowerCAmelCase : List[str] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
lowerCAmelCase : Optional[Any] = xmod_layer.self_attn.q_proj.weight
lowerCAmelCase : Dict = xmod_layer.self_attn.q_proj.bias
lowerCAmelCase : str = xmod_layer.self_attn.k_proj.weight
lowerCAmelCase : List[Any] = xmod_layer.self_attn.k_proj.bias
lowerCAmelCase : List[str] = xmod_layer.self_attn.v_proj.weight
lowerCAmelCase : int = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCAmelCase : Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
lowerCAmelCase : Optional[int] = xmod_layer.self_attn.out_proj.weight
lowerCAmelCase : Dict = xmod_layer.self_attn.out_proj.bias
lowerCAmelCase : List[Any] = xmod_layer.self_attn_layer_norm.weight
lowerCAmelCase : Optional[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCAmelCase : Dict = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
lowerCAmelCase : int = xmod_layer.fca.weight
lowerCAmelCase : Union[str, Any] = xmod_layer.fca.bias
# output
lowerCAmelCase : List[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
lowerCAmelCase : Optional[int] = xmod_layer.fca.weight
lowerCAmelCase : str = xmod_layer.fca.bias
lowerCAmelCase : List[Any] = xmod_layer.final_layer_norm.weight
lowerCAmelCase : int = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCAmelCase : Union[str, Any] = xmod_layer.adapter_layer_norm.weight
lowerCAmelCase : Any = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCAmelCase : Any = bert_output.adapter_modules[lang_code]
lowerCAmelCase : Optional[int] = xmod_layer.adapter_modules[lang_code]
lowerCAmelCase : int = from_adapter.fca.weight
lowerCAmelCase : str = from_adapter.fca.bias
lowerCAmelCase : List[Any] = from_adapter.fca.weight
lowerCAmelCase : Union[str, Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCAmelCase : Any = xmod_sent_encoder.layer_norm.weight
lowerCAmelCase : Dict = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCAmelCase : Any = xmod.model.classification_heads["mnli"].dense.weight
lowerCAmelCase : Dict = xmod.model.classification_heads["mnli"].dense.bias
lowerCAmelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.weight
lowerCAmelCase : int = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
lowerCAmelCase : List[str] = xmod.model.encoder.lm_head.dense.weight
lowerCAmelCase : Optional[int] = xmod.model.encoder.lm_head.dense.bias
lowerCAmelCase : List[str] = xmod.model.encoder.lm_head.layer_norm.weight
lowerCAmelCase : Optional[Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCAmelCase : List[Any] = xmod.model.encoder.lm_head.weight
lowerCAmelCase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCAmelCase : List[str] = xmod.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
lowerCAmelCase : Optional[Any] = xmod.model.classification_heads["mnli"](xmod.extract_features(SCREAMING_SNAKE_CASE ) )
else:
lowerCAmelCase : Optional[int] = xmod.model(SCREAMING_SNAKE_CASE , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCAmelCase : Union[str, Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCAmelCase : Union[str, Any] = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 108
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__A : List[str] = '''examples/'''
__A : int = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__A : Dict = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__A : Optional[int] = '''README.md'''
def lowercase ( __snake_case : int , __snake_case : Any , __snake_case : int ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ : int = f.read()
lowercase_ , lowercase_ : List[str] = REPLACE_PATTERNS[pattern]
lowercase_ : Union[str, Any] = replace.replace('''VERSION''' , __snake_case )
lowercase_ : Optional[Any] = re_pattern.sub(__snake_case , __snake_case )
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__snake_case )
def lowercase ( __snake_case : int ):
for folder, directories, fnames in os.walk(__snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__snake_case , __snake_case ) , __snake_case , pattern='''examples''' )
def lowercase ( __snake_case : Optional[Any] , __snake_case : Optional[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__snake_case , __snake_case , __snake_case )
if not patch:
update_version_in_examples(__snake_case )
def lowercase ( ):
lowercase_ : Union[str, Any] = '''🤗 Transformers currently provides the following architectures'''
lowercase_ : Union[str, Any] = '''1. Want to contribute a new model?'''
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ : List[str] = f.readlines()
# Find the start of the list.
lowercase_ : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase_ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowercase_ : str = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__snake_case )
def lowercase ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowercase_ : List[Any] = f.read()
lowercase_ : List[str] = REPLACE_PATTERNS['''init'''][0].search(__snake_case ).groups()[0]
return packaging.version.parse(__snake_case )
def lowercase ( __snake_case : Optional[Any]=False ):
lowercase_ : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowercase_ : Optional[Any] = default_version.base_version
elif patch:
lowercase_ : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowercase_ : Optional[int] = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowercase_ : int = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__snake_case ) == 0:
lowercase_ : Dict = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__snake_case , patch=__snake_case )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowercase ( ):
lowercase_ : List[Any] = get_version()
lowercase_ : List[str] = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowercase_ : Any = current_version.base_version
# Check with the user we got that right.
lowercase_ : Tuple = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__snake_case ) == 0:
lowercase_ : str = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__snake_case )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__A : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 33
| 0
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase : Tuple = DisjunctiveConstraint(_SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , _SCREAMING_SNAKE_CASE ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(_SCREAMING_SNAKE_CASE ) # fails here
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Tuple = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase : Tuple = DisjunctiveConstraint(_SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = dc.update(1 )
UpperCAmelCase : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(_SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = dc.update(2 )
UpperCAmelCase : List[str] = stepped is True and completed is False and reset is False
self.assertTrue(_SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = dc.update(3 )
UpperCAmelCase : int = stepped is True and completed is True and reset is False
self.assertTrue(_SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase : Tuple = DisjunctiveConstraint(_SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 109
|
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowercase ( __snake_case : str , __snake_case : str , __snake_case : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
lowercase_ : Union[str, Any] = quote(__snake_case )
return hfh.hf_hub_url(__snake_case , __snake_case , repo_type='''dataset''' , revision=__snake_case )
| 33
| 0
|
from math import factorial
lowerCAmelCase = {str(d): factorial(d) for d in range(10)}
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(SCREAMING_SNAKE_CASE ) )
def _a ( ):
"""simple docstring"""
lowercase__ = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , SCREAMING_SNAKE_CASE ) if sum_of_digit_factorial(SCREAMING_SNAKE_CASE ) == i )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 110
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
def __init__( self : int , A : Tuple , A : int=3 , A : List[str]=32 , A : Dict=3 , A : Any=10 , A : Dict=[10, 20, 30, 40] , A : Optional[Any]=[1, 1, 2, 1] , A : Union[str, Any]=True , A : Optional[Any]=True , A : Any="relu" , A : Optional[Any]=3 , A : Tuple=None , ) -> Dict:
lowercase_ : str = parent
lowercase_ : List[Any] = batch_size
lowercase_ : Optional[int] = image_size
lowercase_ : int = num_channels
lowercase_ : int = embeddings_size
lowercase_ : str = hidden_sizes
lowercase_ : List[str] = depths
lowercase_ : Dict = is_training
lowercase_ : int = use_labels
lowercase_ : Any = hidden_act
lowercase_ : List[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Optional[Any] = len(A )
def A ( self : str ) -> Tuple:
lowercase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Union[str, Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def A ( self : Dict ) -> int:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A ( self : str , A : Tuple , A : str , A : str ) -> str:
lowercase_ : str = TFResNetModel(config=A )
lowercase_ : Union[str, Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : Any , A : int , A : List[Any] , A : Optional[Any] ) -> Optional[Any]:
lowercase_ : Tuple = self.num_labels
lowercase_ : Union[str, Any] = TFResNetForImageClassification(A )
lowercase_ : Tuple = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Union[str, Any] ) -> Tuple:
lowercase_ : Tuple = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Dict = config_and_inputs
lowercase_ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : List[Any] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Any = False
def A ( self : Union[str, Any] ) -> List[Any]:
lowercase_ : int = TFResNetModelTester(self )
lowercase_ : str = ConfigTester(self , config_class=A , has_text_modality=A )
def A ( self : Dict ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Dict ) -> List[Any]:
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def A ( self : Any ) -> Any:
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def A ( self : List[str] ) -> Optional[Any]:
pass
def A ( self : str ) -> Tuple:
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : int = model_class(A )
lowercase_ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : str = [*signature.parameters.keys()]
lowercase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def A ( self : List[str] ) -> Tuple:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A ( self : List[Any] ) -> List[str]:
def check_hidden_states_output(A : Union[str, Any] , A : int , A : List[Any] ):
lowercase_ : int = model_class(A )
lowercase_ : Optional[Any] = model(**self._prepare_for_class(A , A ) )
lowercase_ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Any = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Union[str, Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : List[str] = layer_type
lowercase_ : Tuple = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[Any] = True
check_hidden_states_output(A , A , A )
def A ( self : Optional[int] ) -> Tuple:
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def A ( self : List[str] ) -> Optional[int]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Tuple = TFResNetModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase ( ):
lowercase_ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def A ( self : Any ) -> Optional[int]:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : Any ) -> Optional[int]:
lowercase_ : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase_ : List[Any] = self.default_image_processor
lowercase_ : Dict = prepare_img()
lowercase_ : List[str] = image_processor(images=A , return_tensors='''tf''' )
# forward pass
lowercase_ : Tuple = model(**A )
# verify the logits
lowercase_ : Optional[int] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A )
lowercase_ : Optional[Any] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , A , atol=1e-4 ) )
| 33
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class __lowerCamelCase ( _A , _A ):
'''simple docstring'''
A_ : str = "focalnet"
def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=False , __UpperCAmelCase=[192, 384, 768, 768] , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[3, 3, 3, 3] , __UpperCAmelCase="gelu" , __UpperCAmelCase=4.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=False , __UpperCAmelCase=1e-4 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Union[str, Any]:
super().__init__(**__UpperCAmelCase )
_a = image_size
_a = patch_size
_a = num_channels
_a = embed_dim
_a = use_conv_embed
_a = hidden_sizes
_a = depths
_a = focal_levels
_a = focal_windows
_a = hidden_act
_a = mlp_ratio
_a = hidden_dropout_prob
_a = drop_path_rate
_a = use_layerscale
_a = layerscale_value
_a = use_post_layernorm
_a = use_post_layernorm_in_modulation
_a = normalize_modulator
_a = initializer_range
_a = layer_norm_eps
_a = encoder_stride
_a = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
_a = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
| 320
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__A : Dict = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class _UpperCAmelCase ( unittest.TestCase , _A ):
def A ( self : List[Any] ) -> Dict:
lowercase_ : Optional[int] = load_tool('''text-question-answering''' )
self.tool.setup()
lowercase_ : Union[str, Any] = load_tool('''text-question-answering''' , remote=A )
def A ( self : Any ) -> List[str]:
lowercase_ : Union[str, Any] = self.tool(A , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : str ) -> List[str]:
lowercase_ : int = self.remote_tool(A , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : List[Any] ) -> int:
lowercase_ : Optional[Any] = self.tool(text=A , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : List[str] ) -> Optional[int]:
lowercase_ : int = self.remote_tool(text=A , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
| 33
| 0
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
@slow
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
__lowercase =XLMRobertaModel.from_pretrained('xlm-roberta-base' )
__lowercase =torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
__lowercase =torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
__lowercase =torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__lowercase =model(__lowercase )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , __lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowercase , atol=1E-3 ) )
@slow
def snake_case ( self : Optional[int] ):
"""simple docstring"""
__lowercase =XLMRobertaModel.from_pretrained('xlm-roberta-large' )
__lowercase =torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
__lowercase =torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
__lowercase =torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__lowercase =model(__lowercase )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , __lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowercase , atol=1E-3 ) )
| 141
|
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _UpperCAmelCase ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self : Any , A : int=None , **A : str ) -> Union[str, Any]:
super().__init__(features=A )
lowercase_ : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def A ( self : Dict , A : int ) -> List[Any]:
import torch
if isinstance(A , A ) and column:
if all(
isinstance(A , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(A )
return column
def A ( self : int , A : Any ) -> Optional[Any]:
import torch
if isinstance(A , (str, bytes, type(A )) ):
return value
elif isinstance(A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase_ : Any = {}
if isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowercase_ : Any = {'''dtype''': torch.intaa}
elif isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase_ : Dict = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A , PIL.Image.Image ):
lowercase_ : Dict = np.asarray(A )
return torch.tensor(A , **{**default_dtype, **self.torch_tensor_kwargs} )
def A ( self : Union[str, Any] , A : Optional[int] ) -> str:
import torch
# support for torch, tf, jax etc.
if hasattr(A , '''__array__''' ) and not isinstance(A , torch.Tensor ):
lowercase_ : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
elif isinstance(A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
return self._tensorize(A )
def A ( self : Dict , A : dict ) -> Tuple:
return map_nested(self._recursive_tensorize , A , map_list=A )
def A ( self : str , A : pa.Table ) -> Mapping:
lowercase_ : Optional[Any] = self.numpy_arrow_extractor().extract_row(A )
lowercase_ : str = self.python_features_decoder.decode_row(A )
return self.recursive_tensorize(A )
def A ( self : List[Any] , A : pa.Table ) -> "torch.Tensor":
lowercase_ : List[str] = self.numpy_arrow_extractor().extract_column(A )
lowercase_ : str = self.python_features_decoder.decode_column(A , pa_table.column_names[0] )
lowercase_ : Optional[int] = self.recursive_tensorize(A )
lowercase_ : Any = self._consolidate(A )
return column
def A ( self : List[str] , A : pa.Table ) -> Mapping:
lowercase_ : Optional[int] = self.numpy_arrow_extractor().extract_batch(A )
lowercase_ : int = self.python_features_decoder.decode_batch(A )
lowercase_ : Dict = self.recursive_tensorize(A )
for column_name in batch:
lowercase_ : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 33
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 269
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 33
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : Dict = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
lowercase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 99
|
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__A : Union[str, Any] = logging.get_logger(__name__)
# General docstring
__A : Tuple = '''MobileNetV1Config'''
# Base docstring
__A : Union[str, Any] = '''google/mobilenet_v1_1.0_224'''
__A : Union[str, Any] = [1, 1_024, 7, 7]
# Image classification docstring
__A : Optional[Any] = '''google/mobilenet_v1_1.0_224'''
__A : List[Any] = '''tabby, tabby cat'''
__A : Union[str, Any] = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowercase ( __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Dict=None ):
lowercase_ : str = {}
if isinstance(__snake_case , __snake_case ):
lowercase_ : Union[str, Any] = model.mobilenet_va
else:
lowercase_ : Optional[Any] = model
lowercase_ : Union[str, Any] = '''MobilenetV1/Conv2d_0/'''
lowercase_ : Union[str, Any] = backbone.conv_stem.convolution.weight
lowercase_ : Optional[Any] = backbone.conv_stem.normalization.bias
lowercase_ : Union[str, Any] = backbone.conv_stem.normalization.weight
lowercase_ : Any = backbone.conv_stem.normalization.running_mean
lowercase_ : int = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
lowercase_ : Optional[int] = i + 1
lowercase_ : Union[str, Any] = i * 2
lowercase_ : Optional[Any] = backbone.layer[pt_index]
lowercase_ : Union[str, Any] = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
lowercase_ : str = pointer.convolution.weight
lowercase_ : int = pointer.normalization.bias
lowercase_ : Any = pointer.normalization.weight
lowercase_ : Dict = pointer.normalization.running_mean
lowercase_ : Union[str, Any] = pointer.normalization.running_var
lowercase_ : Any = backbone.layer[pt_index + 1]
lowercase_ : Union[str, Any] = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
lowercase_ : int = pointer.convolution.weight
lowercase_ : str = pointer.normalization.bias
lowercase_ : Tuple = pointer.normalization.weight
lowercase_ : Dict = pointer.normalization.running_mean
lowercase_ : Any = pointer.normalization.running_var
if isinstance(__snake_case , __snake_case ):
lowercase_ : Optional[Any] = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
lowercase_ : Any = model.classifier.weight
lowercase_ : Optional[int] = model.classifier.bias
return tf_to_pt_map
def lowercase ( __snake_case : Optional[int] , __snake_case : int , __snake_case : Dict ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
lowercase_ : Tuple = tf.train.list_variables(__snake_case )
lowercase_ : int = {}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
lowercase_ : Optional[Any] = tf.train.load_variable(__snake_case , __snake_case )
lowercase_ : Optional[int] = array
# Build TF to PyTorch weights loading map
lowercase_ : Any = _build_tf_to_pytorch_map(__snake_case , __snake_case , __snake_case )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
lowercase_ : Union[str, Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
lowercase_ : Any = np.transpose(__snake_case , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
lowercase_ : Optional[int] = array.squeeze().transpose()
else:
lowercase_ : Optional[int] = np.transpose(__snake_case , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
lowercase_ : str = torch.from_numpy(__snake_case )
tf_weights.pop(__snake_case , __snake_case )
tf_weights.pop(name + '''/RMSProp''' , __snake_case )
tf_weights.pop(name + '''/RMSProp_1''' , __snake_case )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , __snake_case )
logger.info(F'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' )
return model
def lowercase ( __snake_case : torch.Tensor , __snake_case : nn.Convad ):
lowercase_ , lowercase_ : Optional[int] = features.shape[-2:]
lowercase_ , lowercase_ : str = conv_layer.stride
lowercase_ , lowercase_ : Tuple = conv_layer.kernel_size
if in_height % stride_height == 0:
lowercase_ : Dict = max(kernel_height - stride_height , 0 )
else:
lowercase_ : List[Any] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
lowercase_ : str = max(kernel_width - stride_width , 0 )
else:
lowercase_ : int = max(kernel_width - (in_width % stride_width) , 0 )
lowercase_ : int = pad_along_width // 2
lowercase_ : Union[str, Any] = pad_along_width - pad_left
lowercase_ : Tuple = pad_along_height // 2
lowercase_ : List[str] = pad_along_height - pad_top
lowercase_ : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__snake_case , __snake_case , '''constant''' , 0.0 )
class _UpperCAmelCase ( nn.Module ):
def __init__( self : List[Any] , A : MobileNetVaConfig , A : int , A : int , A : int , A : Optional[int] = 1 , A : Optional[int] = 1 , A : bool = False , A : Optional[bool] = True , A : Optional[bool or str] = True , ) -> None:
super().__init__()
lowercase_ : int = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
lowercase_ : Tuple = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowercase_ : int = nn.Convad(
in_channels=A , out_channels=A , kernel_size=A , stride=A , padding=A , groups=A , bias=A , padding_mode='''zeros''' , )
if use_normalization:
lowercase_ : Optional[Any] = nn.BatchNormad(
num_features=A , eps=config.layer_norm_eps , momentum=0.9997 , affine=A , track_running_stats=A , )
else:
lowercase_ : Union[str, Any] = None
if use_activation:
if isinstance(A , A ):
lowercase_ : str = ACTaFN[use_activation]
elif isinstance(config.hidden_act , A ):
lowercase_ : Any = ACTaFN[config.hidden_act]
else:
lowercase_ : Tuple = config.hidden_act
else:
lowercase_ : Tuple = None
def A ( self : str , A : torch.Tensor ) -> torch.Tensor:
if self.config.tf_padding:
lowercase_ : List[Any] = apply_tf_padding(A , self.convolution )
lowercase_ : Optional[int] = self.convolution(A )
if self.normalization is not None:
lowercase_ : Union[str, Any] = self.normalization(A )
if self.activation is not None:
lowercase_ : Optional[int] = self.activation(A )
return features
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Optional[int] = MobileNetVaConfig
SCREAMING_SNAKE_CASE_ : int = load_tf_weights_in_mobilenet_va
SCREAMING_SNAKE_CASE_ : Optional[Any] = "mobilenet_v1"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "pixel_values"
SCREAMING_SNAKE_CASE_ : List[str] = False
def A ( self : Any , A : Union[nn.Linear, nn.Convad] ) -> None:
if isinstance(A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(A , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__A : Union[str, Any] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__A : List[str] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , _A , )
class _UpperCAmelCase ( _A ):
def __init__( self : str , A : MobileNetVaConfig , A : bool = True ) -> int:
super().__init__(A )
lowercase_ : Union[str, Any] = config
lowercase_ : List[str] = 32
lowercase_ : str = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowercase_ : Union[str, Any] = MobileNetVaConvLayer(
A , in_channels=config.num_channels , out_channels=A , kernel_size=3 , stride=2 , )
lowercase_ : Optional[Any] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowercase_ : List[Any] = nn.ModuleList()
for i in range(13 ):
lowercase_ : Dict = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowercase_ : str = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
A , in_channels=A , out_channels=A , kernel_size=3 , stride=strides[i] , groups=A , ) )
self.layer.append(
MobileNetVaConvLayer(
A , in_channels=A , out_channels=A , kernel_size=1 , ) )
lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def A ( self : Any , A : Optional[Any] ) -> Optional[int]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : List[Any] , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
lowercase_ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
lowercase_ : List[str] = self.conv_stem(A )
lowercase_ : Dict = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowercase_ : Optional[int] = layer_module(A )
if output_hidden_states:
lowercase_ : str = all_hidden_states + (hidden_states,)
lowercase_ : Tuple = hidden_states
if self.pooler is not None:
lowercase_ : Dict = torch.flatten(self.pooler(A ) , start_dim=1 )
else:
lowercase_ : Optional[Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A , pooler_output=A , hidden_states=A , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _A , )
class _UpperCAmelCase ( _A ):
def __init__( self : List[str] , A : MobileNetVaConfig ) -> None:
super().__init__(A )
lowercase_ : int = config.num_labels
lowercase_ : List[str] = MobileNetVaModel(A )
lowercase_ : Union[str, Any] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowercase_ : Tuple = nn.Dropout(config.classifier_dropout_prob , inplace=A )
lowercase_ : int = nn.Linear(A , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Optional[Any] , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : List[Any] = self.mobilenet_va(A , output_hidden_states=A , return_dict=A )
lowercase_ : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : Dict = self.classifier(self.dropout(A ) )
lowercase_ : int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ : List[str] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ : Optional[Any] = '''single_label_classification'''
else:
lowercase_ : Tuple = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase_ : str = MSELoss()
if self.num_labels == 1:
lowercase_ : List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ : List[str] = loss_fct(A , A )
elif self.config.problem_type == "single_label_classification":
lowercase_ : List[Any] = CrossEntropyLoss()
lowercase_ : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ : str = BCEWithLogitsLoss()
lowercase_ : List[Any] = loss_fct(A , A )
if not return_dict:
lowercase_ : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=A , logits=A , hidden_states=outputs.hidden_states , )
| 33
| 0
|
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> Dict:
def count_of_possible_combinations(SCREAMING_SNAKE_CASE : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__snake_case )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> Dict:
def count_of_possible_combinations_with_dp_array(
SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__lowercase = sum(
count_of_possible_combinations_with_dp_array(target - item , __snake_case )
for item in array )
__lowercase = answer
return answer
__lowercase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__snake_case , __snake_case )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> List[str]:
__lowercase = [0] * (target + 1)
__lowercase = 1
for i in range(1 , target + 1 ):
for j in range(__snake_case ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = 5
SCREAMING_SNAKE_CASE__ = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 325
|
"""simple docstring"""
def lowercase ( __snake_case : list[int] ):
lowercase_ : List[Any] = len(__snake_case )
for i in range(__snake_case ):
for j in range(i + 1 , __snake_case ):
if numbers[j] < numbers[i]:
lowercase_ , lowercase_ : Optional[int] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__A : int = input('''Enter numbers separated by a comma:\n''').strip()
__A : Any = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 33
| 0
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
__lowerCAmelCase : str =parser.parse_args()
__lowerCAmelCase : List[Any] =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__lowerCAmelCase : Dict =CLIPImageProcessor()
__lowerCAmelCase : Union[str, Any] =CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
__lowerCAmelCase : List[str] =UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 197
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : List[Any] , A : Any , A : Tuple=7 , A : Tuple=3 , A : Optional[Any]=30 , A : List[Any]=4_00 , A : Tuple=True , A : Dict=None , A : List[str]=True , A : Optional[int]=[0.5, 0.5, 0.5] , A : Tuple=[0.5, 0.5, 0.5] , A : List[str]=True , A : List[Any]=1 / 2_55 , A : Union[str, Any]=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase_ : Optional[int] = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
lowercase_ : Optional[int] = parent
lowercase_ : str = batch_size
lowercase_ : Tuple = num_channels
lowercase_ : str = min_resolution
lowercase_ : Any = max_resolution
lowercase_ : str = do_resize
lowercase_ : Any = size
lowercase_ : Optional[int] = do_normalize
lowercase_ : List[str] = image_mean
lowercase_ : Optional[Any] = image_std
lowercase_ : int = do_rescale
lowercase_ : List[str] = rescale_factor
lowercase_ : int = do_pad
def A ( self : Any ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A ( self : Optional[Any] , A : int , A : int=False ) -> Tuple:
if not batched:
lowercase_ : Optional[int] = image_inputs[0]
if isinstance(A , Image.Image ):
lowercase_ , lowercase_ : int = image.size
else:
lowercase_ , lowercase_ : Tuple = image.shape[1], image.shape[2]
if w < h:
lowercase_ : int = int(self.size['''shortest_edge'''] * h / w )
lowercase_ : Optional[Any] = self.size['''shortest_edge''']
elif w > h:
lowercase_ : Optional[Any] = self.size['''shortest_edge''']
lowercase_ : Optional[int] = int(self.size['''shortest_edge'''] * w / h )
else:
lowercase_ : Any = self.size['''shortest_edge''']
lowercase_ : Any = self.size['''shortest_edge''']
else:
lowercase_ : Tuple = []
for image in image_inputs:
lowercase_ , lowercase_ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase_ : Union[str, Any] = max(A , key=lambda A : item[0] )[0]
lowercase_ : Optional[Any] = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCAmelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = YolosImageProcessor if is_vision_available() else None
def A ( self : Optional[int] ) -> Optional[int]:
lowercase_ : Optional[Any] = YolosImageProcessingTester(self )
@property
def A ( self : str ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Optional[int] ) -> List[str]:
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def A ( self : Dict ) -> Tuple:
lowercase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , A )
lowercase_ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , A )
def A ( self : Optional[int] ) -> Tuple:
pass
def A ( self : Tuple ) -> int:
# Initialize image_processing
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowercase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ , lowercase_ : Dict = self.image_processor_tester.get_expected_values(A , batched=A )
lowercase_ : str = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : str ) -> Any:
# Initialize image_processing
lowercase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowercase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : int = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ : Optional[int] = image_processing(A , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : List[Any] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Optional[int]:
# Initialize image_processing
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowercase_ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ : Any = image_processing(A , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : List[str] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Optional[Any]:
# Initialize image_processings
lowercase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
lowercase_ : Tuple = self.image_processing_class(do_resize=A , do_normalize=A , do_rescale=A )
# create random PyTorch tensors
lowercase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowercase_ : Union[str, Any] = image_processing_a.pad(A , return_tensors='''pt''' )
lowercase_ : List[Any] = image_processing_a(A , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def A ( self : str ) -> List[Any]:
# prepare image and target
lowercase_ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowercase_ : List[Any] = json.loads(f.read() )
lowercase_ : Tuple = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
lowercase_ : Union[str, Any] = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
lowercase_ : List[Any] = image_processing(images=A , annotations=A , return_tensors='''pt''' )
# verify pixel values
lowercase_ : Union[str, Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowercase_ : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowercase_ : Tuple = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowercase_ : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowercase_ : Any = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1e-3 ) )
# verify image_id
lowercase_ : List[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowercase_ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowercase_ : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify orig_size
lowercase_ : List[str] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowercase_ : Optional[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
@slow
def A ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
lowercase_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowercase_ : str = json.loads(f.read() )
lowercase_ : int = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
lowercase_ : List[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowercase_ : int = YolosImageProcessor(format='''coco_panoptic''' )
lowercase_ : Any = image_processing(images=A , annotations=A , masks_path=A , return_tensors='''pt''' )
# verify pixel values
lowercase_ : Optional[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowercase_ : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowercase_ : List[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowercase_ : str = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowercase_ : List[str] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1e-3 ) )
# verify image_id
lowercase_ : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowercase_ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowercase_ : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify masks
lowercase_ : Dict = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A )
# verify orig_size
lowercase_ : Tuple = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowercase_ : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
| 33
| 0
|
'''simple docstring'''
from math import factorial
def _A ( A__ = 20 ):
"""simple docstring"""
__lowercase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
__lowercase = n // 2
return int(factorial(__snake_case ) / (factorial(__snake_case ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowerCAmelCase__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''')
| 104
|
"""simple docstring"""
def lowercase ( __snake_case : int = 1_0_0 ):
lowercase_ : str = 0
lowercase_ : List[Any] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 33
| 0
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[int] ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
a = 1
a = 3
a = (32, 32)
a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCamelCase )
return image
@property
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
torch.manual_seed(0 )
a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
torch.manual_seed(0 )
a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
def extract(*__lowerCamelCase : int , **__lowerCamelCase : List[str] ):
class snake_case__ :
"""simple docstring"""
def __init__( self : List[Any] ) -> Tuple:
a = torch.ones([0] )
def __UpperCAmelCase ( self : str , __lowerCamelCase : Any ) -> str:
self.pixel_values.to(__lowerCamelCase )
return self
return Out()
return extract
def __UpperCAmelCase ( self : int ) -> List[str]:
a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a = self.dummy_cond_unet
a = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
a = self.dummy_vae
a = self.dummy_text_encoder
a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
a = StableDiffusionPipeline(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , )
a = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
a = '''A painting of a squirrel eating a burger'''
a = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
a = sd_pipe([prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
a = output.images
a = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
a = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=__lowerCamelCase , )[0]
a = image[0, -3:, -3:, -1]
a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ) -> Tuple:
a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a = self.dummy_cond_unet
a = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
a = self.dummy_vae
a = self.dummy_text_encoder
a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
a = StableDiffusionPipeline(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , )
a = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
a = '''A painting of a squirrel eating a burger'''
a = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
a = sd_pipe([prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
a = output.images
a = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
a = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=__lowerCamelCase , )[0]
a = image[0, -3:, -3:, -1]
a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : Tuple ) -> Any:
a = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert isinstance(pipe.scheduler , __lowerCamelCase )
assert pipe.safety_checker is None
a = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCamelCase )
a = StableDiffusionPipeline.from_pretrained(__lowerCamelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
a = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __UpperCAmelCase ( self : Tuple ) -> Any:
a = self.dummy_cond_unet
a = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
a = self.dummy_vae
a = self.dummy_text_encoder
a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
a = unet.half()
a = vae.half()
a = bert.half()
# make sure here that pndm scheduler skips prk
a = StableDiffusionPipeline(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , )
a = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
a = '''A painting of a squirrel eating a burger'''
a = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
a = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=__lowerCamelCase )
a = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
a = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
a = 40_03_66_03_46
a = 7
# without safety guidance (sld_guidance_scale = 0)
a = torch.manual_seed(__lowerCamelCase )
a = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
a = output.images
a = image[0, -3:, -3:, -1]
a = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
a = torch.manual_seed(__lowerCamelCase )
a = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a = output.images
a = image[0, -3:, -3:, -1]
a = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : Any ) -> List[str]:
a = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=__lowerCamelCase )
a = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
a = '''padme amidala taking a bath artwork, safe for work, no nudity'''
a = 27_34_97_17_55
a = 7
a = torch.manual_seed(__lowerCamelCase )
a = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
a = output.images
a = image[0, -3:, -3:, -1]
a = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
a = torch.manual_seed(__lowerCamelCase )
a = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a = output.images
a = image[0, -3:, -3:, -1]
a = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
a = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
a = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
a = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
a = 10_44_35_52_34
a = 12
a = torch.manual_seed(__lowerCamelCase )
a = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
a = output.images
a = image[0, -3:, -3:, -1]
a = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
a = torch.manual_seed(__lowerCamelCase )
a = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a = output.images
a = image[0, -3:, -3:, -1]
a = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 107
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__A : str = parser.parse_args()
__A : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__A : Dict = CLIPImageProcessor()
__A : Union[str, Any] = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__A : List[str] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 33
| 0
|
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
A_ = TapasConfig.from_json_file(__snake_case )
# set absolute/relative position embeddings parameter
A_ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
A_ = TapasForQuestionAnswering(config=__snake_case )
elif task == "WTQ":
# run_task_main.py hparams
A_ = 4
A_ = True
# hparam_utils.py hparams
A_ = 0.664_694
A_ = 0.207_951
A_ = 0.121_194
A_ = True
A_ = True
A_ = False
A_ = 0.0_352_513
A_ = TapasForQuestionAnswering(config=__snake_case )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
A_ = 4
A_ = False
# hparam_utils.py hparams
A_ = 36.4_519
A_ = 0.903_421
A_ = 222.088
A_ = True
A_ = True
A_ = True
A_ = 0.763_141
A_ = TapasForQuestionAnswering(config=__snake_case )
elif task == "TABFACT":
A_ = TapasForSequenceClassification(config=__snake_case )
elif task == "MLM":
A_ = TapasForMaskedLM(config=__snake_case )
elif task == "INTERMEDIATE_PRETRAINING":
A_ = TapasModel(config=__snake_case )
else:
raise ValueError(F'''Task {task} not supported.''' )
print(F'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__snake_case, __snake_case, __snake_case )
# Save pytorch-model (weights and configuration)
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__snake_case )
# Save tokenizer files
print(F'''Save tokenizer files to {pytorch_dump_path}''' )
A_ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""", model_max_length=5_12 )
tokenizer.save_pretrained(__snake_case )
print("""Used relative position embeddings:""", model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 162
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Any = KandinskyVaaControlnetImgaImgPipeline
SCREAMING_SNAKE_CASE_ : Optional[int] = ["image_embeds", "negative_image_embeds", "image", "hint"]
SCREAMING_SNAKE_CASE_ : str = ["image_embeds", "negative_image_embeds", "image", "hint"]
SCREAMING_SNAKE_CASE_ : Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
SCREAMING_SNAKE_CASE_ : Dict = False
@property
def A ( self : Any ) -> Any:
return 32
@property
def A ( self : Optional[int] ) -> Any:
return 32
@property
def A ( self : Dict ) -> int:
return self.time_input_dim
@property
def A ( self : Tuple ) -> str:
return self.time_input_dim * 4
@property
def A ( self : Any ) -> str:
return 1_00
@property
def A ( self : str ) -> List[str]:
torch.manual_seed(0 )
lowercase_ : List[Any] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase_ : Dict = UNetaDConditionModel(**A )
return model
@property
def A ( self : Optional[Any] ) -> Union[str, Any]:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def A ( self : List[Any] ) -> Dict:
torch.manual_seed(0 )
lowercase_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ : Tuple = self.dummy_unet
lowercase_ : int = self.dummy_movq
lowercase_ : List[Any] = {
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowercase_ : str = DDIMScheduler(**A )
lowercase_ : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def A ( self : Optional[int] , A : int , A : List[str]=0 ) -> int:
lowercase_ : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A ) ).to(A )
lowercase_ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A )
# create init_image
lowercase_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A ) ).to(A )
lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ : Optional[Any] = Image.fromarray(np.uinta(A ) ).convert('''RGB''' ).resize((2_56, 2_56) )
# create hint
lowercase_ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A ) ).to(A )
if str(A ).startswith('''mps''' ):
lowercase_ : Optional[Any] = torch.manual_seed(A )
else:
lowercase_ : List[Any] = torch.Generator(device=A ).manual_seed(A )
lowercase_ : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def A ( self : Any ) -> List[Any]:
lowercase_ : List[str] = '''cpu'''
lowercase_ : Any = self.get_dummy_components()
lowercase_ : Any = self.pipeline_class(**A )
lowercase_ : int = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowercase_ : Dict = pipe(**self.get_dummy_inputs(A ) )
lowercase_ : str = output.images
lowercase_ : int = pipe(
**self.get_dummy_inputs(A ) , return_dict=A , )[0]
lowercase_ : Dict = image[0, -3:, -3:, -1]
lowercase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ : List[str] = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : Tuple ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Any ) -> Optional[int]:
lowercase_ : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
lowercase_ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowercase_ : Optional[int] = init_image.resize((5_12, 5_12) )
lowercase_ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
lowercase_ : Optional[int] = torch.from_numpy(np.array(A ) ).float() / 255.0
lowercase_ : Tuple = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowercase_ : Optional[Any] = '''A robot, 4k photo'''
lowercase_ : Tuple = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(A )
lowercase_ : Dict = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
lowercase_ : int = pipeline.to(A )
pipeline.set_progress_bar_config(disable=A )
lowercase_ : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase_ , lowercase_ : int = pipe_prior(
A , image=A , strength=0.85 , generator=A , negative_prompt='''''' , ).to_tuple()
lowercase_ : str = pipeline(
image=A , image_embeds=A , negative_image_embeds=A , hint=A , generator=A , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type='''np''' , )
lowercase_ : Optional[Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(A , A )
| 33
| 0
|
"""simple docstring"""
from collections.abc import Sequence
def a__ ( snake_case__ , snake_case__ = False ) -> int:
if not arr:
return 0
lowerCamelCase = 0 if allow_empty_subarrays else float("""-inf""" )
lowerCamelCase = 0.0
for num in arr:
lowerCamelCase = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowerCamelCase = max(__snake_case , __snake_case )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase : List[Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"""{max_subarray_sum(nums) = }""")
| 291
|
"""simple docstring"""
def lowercase ( __snake_case : int = 1_0_0_0 ):
lowercase_ , lowercase_ : str = 1, 1
lowercase_ : List[str] = 2
while True:
lowercase_ : Tuple = 0
lowercase_ : List[Any] = fa + fa
lowercase_ , lowercase_ : Optional[int] = fa, f
index += 1
for _ in str(__snake_case ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 33
| 0
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a( A : Dict , A : str , A : Dict=None , A : Dict=None ) -> Any:
"""simple docstring"""
if attention_mask is None:
a = tf.cast(tf.math.not_equal(__snake_case , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _lowercase :
"""simple docstring"""
__A = OPTConfig
__A = {}
__A = "gelu"
def __init__(self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=99 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=4 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=20 , lowerCamelCase_=2 , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=16 , lowerCamelCase_=16 , ):
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = eos_token_id
a = pad_token_id
a = bos_token_id
a = embed_dim
a = word_embed_proj_dim
a = False
def UpperCamelCase_ (self ):
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a = tf.concat([input_ids, eos_tensor] , axis=1 )
a = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCamelCase_ , **self.config_updates , )
a = prepare_opt_inputs_dict(lowerCamelCase_ , lowerCamelCase_ )
return config, inputs_dict
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFOPTModel(config=lowerCamelCase_ )
a = inputs_dict['''input_ids''']
a = input_ids[:1, :]
a = inputs_dict['''attention_mask'''][:1, :]
a = 1
# first forward pass
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , use_cache=lowerCamelCase_ )
a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) , config.vocab_size )
a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a = tf.concat([input_ids, next_tokens] , axis=-1 )
a = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )[0]
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a = output_from_no_past[:, -3:, random_slice_idx]
a = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase_ , lowerCamelCase_ , rtol=1E-3 )
@require_tf
class _lowercase ( _A, _A, unittest.TestCase ):
"""simple docstring"""
__A = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__A = (TFOPTForCausalLM,) if is_tf_available() else ()
__A = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
__A = False
__A = False
__A = False
__A = 10
def UpperCamelCase_ (self ):
"""simple docstring"""
a = TFOPTModelTester(self )
a = ConfigTester(self , config_class=lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCamelCase_ , lowerCamelCase_ ):
if hasattr(lowerCamelCase_ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCamelCase_ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
a = model_class(config=lowerCamelCase_ )
a = _get_word_embedding_weight(lowerCamelCase_ , model.get_input_embeddings() )
a = _get_word_embedding_weight(lowerCamelCase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCamelCase_ )
a = _get_word_embedding_weight(lowerCamelCase_ , model.get_input_embeddings() )
a = _get_word_embedding_weight(lowerCamelCase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
a = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCamelCase_ )
# check that weights remain the same after resizing
a = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
a = False
self.assertTrue(lowerCamelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCamelCase_ )
a = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
a = False
self.assertTrue(lowerCamelCase_ )
def a( A : int ) -> List[Any]:
"""simple docstring"""
return tf.constant(__snake_case , dtype=tf.intaa )
@require_tf
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
__A = 99
def UpperCamelCase_ (self ):
"""simple docstring"""
a = tf.ones((4, 1) , dtype=tf.intaa ) * 2
a = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
a = input_ids.shape[0]
a = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = TFOPTModel.from_pretrained("facebook/opt-350m" )
a = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
a = tf.not_equal(lowerCamelCase_ , model.config.pad_token_id )
with tf.GradientTape():
a = model(input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ ).last_hidden_state
a = (1, 11, 512)
self.assertEqual(output.shape , lowerCamelCase_ )
a = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=4E-3 ) )
a = tf.function(lowerCamelCase_ , jit_compile=lowerCamelCase_ )
a = xla_generate(lowerCamelCase_ , lowerCamelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=4E-2 ) )
@require_tf
@slow
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
a = '''facebook/opt-350m'''
def UpperCamelCase_ (self ):
"""simple docstring"""
a = TFOPTForCausalLM.from_pretrained(self.path_model )
a = GPTaTokenizer.from_pretrained(self.path_model )
a = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
a = tokenizer(lowerCamelCase_ , return_tensors="tf" , padding=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
a = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
a = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-4 ) )
a = tf.function(lowerCamelCase_ , jit_compile=lowerCamelCase_ )
a = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-4 ) )
@require_tf
@slow
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def UpperCamelCase_ (self ):
"""simple docstring"""
a = '''facebook/opt-125m'''
a = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
a = []
a = GPTaTokenizer.from_pretrained(lowerCamelCase_ )
a = TFOPTForCausalLM.from_pretrained(lowerCamelCase_ )
for prompt in self.prompts:
a = tokenizer(lowerCamelCase_ , return_tensors="tf" ).input_ids
a = model.generate(lowerCamelCase_ , max_length=10 )
a = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = '''facebook/opt-350m'''
a = GPTaTokenizer.from_pretrained(lowerCamelCase_ )
a = TFOPTForCausalLM.from_pretrained(lowerCamelCase_ )
a = '''left'''
# use different length sentences to test batching
a = [
'''Hello, my dog is a little''',
'''Today, I''',
]
a = tokenizer(lowerCamelCase_ , return_tensors="tf" , padding=lowerCamelCase_ )
a = inputs['''input_ids''']
a = model.generate(input_ids=lowerCamelCase_ , attention_mask=inputs["attention_mask"] )
a = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
a = model.generate(input_ids=lowerCamelCase_ )
a = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
a = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
a = model.generate(input_ids=lowerCamelCase_ , max_length=model.config.max_length - num_paddings )
a = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
a = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase_ )
a = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase_ )
a = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , [non_padded_sentence, padded_sentence] )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = '''facebook/opt-350m'''
a = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
a = []
a = GPTaTokenizer.from_pretrained(lowerCamelCase_ )
a = TFOPTForCausalLM.from_pretrained(lowerCamelCase_ )
for prompt in self.prompts:
a = tokenizer(lowerCamelCase_ , return_tensors="tf" ).input_ids
a = model.generate(lowerCamelCase_ , max_length=10 )
a = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
| 227
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : Union[str, Any] = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "vit_mae"
def __init__( self : Dict , A : List[str]=7_68 , A : Any=12 , A : Union[str, Any]=12 , A : Tuple=30_72 , A : Any="gelu" , A : Tuple=0.0 , A : List[str]=0.0 , A : Tuple=0.02 , A : Tuple=1e-12 , A : int=2_24 , A : Dict=16 , A : int=3 , A : Tuple=True , A : Tuple=16 , A : Optional[Any]=5_12 , A : Union[str, Any]=8 , A : List[Any]=20_48 , A : Dict=0.75 , A : Any=False , **A : Optional[int] , ) -> Union[str, Any]:
super().__init__(**A )
lowercase_ : List[Any] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : int = initializer_range
lowercase_ : Dict = layer_norm_eps
lowercase_ : Optional[Any] = image_size
lowercase_ : str = patch_size
lowercase_ : Dict = num_channels
lowercase_ : Any = qkv_bias
lowercase_ : Union[str, Any] = decoder_num_attention_heads
lowercase_ : Optional[Any] = decoder_hidden_size
lowercase_ : List[str] = decoder_num_hidden_layers
lowercase_ : List[Any] = decoder_intermediate_size
lowercase_ : Optional[Any] = mask_ratio
lowercase_ : Optional[Any] = norm_pix_loss
| 33
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 320
|
"""simple docstring"""
def lowercase ( __snake_case : int ):
if n == 1 or not isinstance(__snake_case , __snake_case ):
return 0
elif n == 2:
return 1
else:
lowercase_ : Dict = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase ( __snake_case : int ):
lowercase_ : str = 0
lowercase_ : List[str] = 2
while digits < n:
index += 1
lowercase_ : Any = len(str(fibonacci(__snake_case ) ) )
return index
def lowercase ( __snake_case : int = 1_0_0_0 ):
return fibonacci_digits_index(__snake_case )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 33
| 0
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCAmelCase = get_tests_dir('''fixtures''')
class lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
__lowercase =mock.Mock()
__lowercase =500
__lowercase ={}
__lowercase =HTTPError
__lowercase ={}
# Download this model to make sure it's in the cache.
__lowercase =ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__lowercase ) as mock_head:
__lowercase =ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
__lowercase =ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def snake_case ( self : str ):
"""simple docstring"""
with self.assertRaises(__lowercase ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowercase =AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
__lowercase =AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants' , subfolder='feature_extractor' )
self.assertIsNotNone(__lowercase )
@is_staging_test
class lowerCAmelCase ( unittest.TestCase ):
@classmethod
def snake_case ( cls : Union[str, Any] ):
"""simple docstring"""
__lowercase =TOKEN
HfFolder.save_token(__lowercase )
@classmethod
def snake_case ( cls : Any ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def snake_case ( self : List[str] ):
"""simple docstring"""
__lowercase =ViTImageProcessor.from_pretrained(__lowercase )
image_processor.push_to_hub('test-image-processor' , use_auth_token=self._token )
__lowercase =ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowercase , getattr(__lowercase , __lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__lowercase , repo_id='test-image-processor' , push_to_hub=__lowercase , use_auth_token=self._token )
__lowercase =ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowercase , getattr(__lowercase , __lowercase ) )
def snake_case ( self : str ):
"""simple docstring"""
__lowercase =ViTImageProcessor.from_pretrained(__lowercase )
image_processor.push_to_hub('valid_org/test-image-processor' , use_auth_token=self._token )
__lowercase =ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowercase , getattr(__lowercase , __lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__lowercase , repo_id='valid_org/test-image-processor-org' , push_to_hub=__lowercase , use_auth_token=self._token )
__lowercase =ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowercase , getattr(__lowercase , __lowercase ) )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
__lowercase =CustomImageProcessor.from_pretrained(__lowercase )
image_processor.push_to_hub('test-dynamic-image-processor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'} , )
__lowercase =AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__lowercase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , 'CustomImageProcessor' )
| 141
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A : List[str] = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''MobileNetV2FeatureExtractor''']
__A : Optional[int] = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33
| 0
|
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__snake_case : Tuple = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
__snake_case : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowercase ( ) -> List[str]:
__lowerCAmelCase : Tuple = '''https://pypi.org/pypi/diffusers/json'''
__lowerCAmelCase : str = json.loads(request.urlopen(__snake_case ).read() )['''releases'''].keys()
return sorted(__snake_case ,key=lambda __snake_case : version.Version(__snake_case ) )
def _lowercase ( ) -> Optional[Any]:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__snake_case )
os.makedirs(__snake_case ,exist_ok=__snake_case )
__lowerCAmelCase : List[str] = Path(__snake_case ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def _lowercase ( __snake_case ) -> Dict:
init_hf_modules()
__lowerCAmelCase : List[str] = Path(__snake_case ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__snake_case ,exist_ok=__snake_case )
__lowerCAmelCase : str = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def _lowercase ( __snake_case ) -> Dict:
with open(__snake_case ,"r" ,encoding="utf-8" ) as f:
__lowerCAmelCase : int = f.read()
# Imports of the form `import .xxx`
__lowerCAmelCase : List[Any] = re.findall("^\s*import\s+\.(\S+)\s*$" ,__snake_case ,flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" ,__snake_case ,flags=re.MULTILINE )
# Unique-ify
return list(set(__snake_case ) )
def _lowercase ( __snake_case ) -> Tuple:
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Dict = [module_file]
__lowerCAmelCase : List[str] = []
# Let's recurse through all relative imports
while not no_change:
__lowerCAmelCase : Union[str, Any] = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__snake_case ) )
__lowerCAmelCase : Optional[Any] = Path(__snake_case ).parent
__lowerCAmelCase : Any = [str(module_path / m ) for m in new_imports]
__lowerCAmelCase : str = [f for f in new_import_files if f not in all_relative_imports]
__lowerCAmelCase : List[Any] = [F"""{f}.py""" for f in new_import_files]
__lowerCAmelCase : Union[str, Any] = len(__snake_case ) == 0
all_relative_imports.extend(__snake_case )
return all_relative_imports
def _lowercase ( __snake_case ) -> Dict:
with open(__snake_case ,"r" ,encoding="utf-8" ) as f:
__lowerCAmelCase : Optional[Any] = f.read()
# Imports of the form `import xxx`
__lowerCAmelCase : Optional[int] = re.findall("^\s*import\s+(\S+)\s*$" ,__snake_case ,flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" ,__snake_case ,flags=re.MULTILINE )
# Only keep the top-level module
__lowerCAmelCase : str = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
__lowerCAmelCase : List[Any] = list(set(__snake_case ) )
__lowerCAmelCase : List[str] = []
for imp in imports:
try:
importlib.import_module(__snake_case )
except ImportError:
missing_packages.append(__snake_case )
if len(__snake_case ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
F"""{', '.join(__snake_case )}. Run `pip install {' '.join(__snake_case )}`""" )
return get_relative_imports(__snake_case )
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
__lowerCAmelCase : Tuple = module_path.replace(os.path.sep ,"." )
__lowerCAmelCase : List[Any] = importlib.import_module(__snake_case )
if class_name is None:
return find_pipeline_class(__snake_case )
return getattr(__snake_case ,__snake_case )
def _lowercase ( __snake_case ) -> Any:
from ..pipelines import DiffusionPipeline
__lowerCAmelCase : Any = dict(inspect.getmembers(__snake_case ,inspect.isclass ) )
__lowerCAmelCase : Union[str, Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls ,__snake_case )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
F""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
F""" {loaded_module}.""" )
__lowerCAmelCase : List[str] = cls
return pipeline_class
def _lowercase ( __snake_case ,__snake_case ,__snake_case = None ,__snake_case = False ,__snake_case = False ,__snake_case = None ,__snake_case = None ,__snake_case = None ,__snake_case = False ,) -> str:
__lowerCAmelCase : Optional[Any] = str(__snake_case )
__lowerCAmelCase : Optional[Any] = os.path.join(__snake_case ,__snake_case )
if os.path.isfile(__snake_case ):
__lowerCAmelCase : Union[str, Any] = module_file_or_url
__lowerCAmelCase : Tuple = '''local'''
elif pretrained_model_name_or_path.count("/" ) == 0:
__lowerCAmelCase : str = get_diffusers_versions()
# cut ".dev0"
__lowerCAmelCase : Optional[int] = '''v''' + '''.'''.join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
__lowerCAmelCase : Any = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
__lowerCAmelCase : Optional[int] = F"""v{revision}"""
elif revision == "main":
__lowerCAmelCase : Optional[int] = revision
else:
raise ValueError(
F"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
F""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
__lowerCAmelCase : Union[str, Any] = COMMUNITY_PIPELINES_URL.format(revision=__snake_case ,pipeline=__snake_case )
try:
__lowerCAmelCase : Any = cached_download(
__snake_case ,cache_dir=__snake_case ,force_download=__snake_case ,proxies=__snake_case ,resume_download=__snake_case ,local_files_only=__snake_case ,use_auth_token=__snake_case ,)
__lowerCAmelCase : Union[str, Any] = '''git'''
__lowerCAmelCase : str = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
__lowerCAmelCase : int = hf_hub_download(
__snake_case ,__snake_case ,cache_dir=__snake_case ,force_download=__snake_case ,proxies=__snake_case ,resume_download=__snake_case ,local_files_only=__snake_case ,use_auth_token=__snake_case ,)
__lowerCAmelCase : Optional[Any] = os.path.join("local" ,"--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
__lowerCAmelCase : List[str] = check_imports(__snake_case )
# Now we move the module inside our cached dynamic modules.
__lowerCAmelCase : str = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__snake_case )
__lowerCAmelCase : List[Any] = Path(__snake_case ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__snake_case ,submodule_path / module_file )
for module_needed in modules_needed:
__lowerCAmelCase : List[str] = F"""{module_needed}.py"""
shutil.copy(os.path.join(__snake_case ,__snake_case ) ,submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__snake_case ,__snake_case ):
__lowerCAmelCase : Any = use_auth_token
elif use_auth_token is True:
__lowerCAmelCase : Optional[Any] = HfFolder.get_token()
else:
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : Dict = model_info(__snake_case ,revision=__snake_case ,token=__snake_case ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__lowerCAmelCase : int = submodule_path / commit_hash
__lowerCAmelCase : Optional[int] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__snake_case )
if not (submodule_path / module_file).exists():
shutil.copy(__snake_case ,submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__snake_case ,F"""{module_needed}.py""" ,cache_dir=__snake_case ,force_download=__snake_case ,resume_download=__snake_case ,proxies=__snake_case ,use_auth_token=__snake_case ,revision=__snake_case ,local_files_only=__snake_case ,)
return os.path.join(__snake_case ,__snake_case )
def _lowercase ( __snake_case ,__snake_case ,__snake_case = None ,__snake_case = None ,__snake_case = False ,__snake_case = False ,__snake_case = None ,__snake_case = None ,__snake_case = None ,__snake_case = False ,**__snake_case ,) -> List[Any]:
__lowerCAmelCase : int = get_cached_module_file(
__snake_case ,__snake_case ,cache_dir=__snake_case ,force_download=__snake_case ,resume_download=__snake_case ,proxies=__snake_case ,use_auth_token=__snake_case ,revision=__snake_case ,local_files_only=__snake_case ,)
return get_class_in_module(__snake_case ,final_module.replace(".py" ,"" ) )
| 269
|
"""simple docstring"""
from __future__ import annotations
__A : List[Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__A : str = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = []
lowercase_ : List[Any] = len(__snake_case )
for i in range(__snake_case ):
lowercase_ : float = -1
for j in range(i + 1 , __snake_case ):
if arr[i] < arr[j]:
lowercase_ : List[str] = arr[j]
break
result.append(__snake_case )
return result
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = []
for i, outer in enumerate(__snake_case ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : List[Any] = inner
break
result.append(__snake_case )
return result
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = len(__snake_case )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(__snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__A : int = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 33
| 0
|
def A_ ( A__ , A__ ) -> Union[str, Any]:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
a__ : List[Any] = (boundary[1] - boundary[0]) / steps
a__ : List[str] = boundary[0]
a__ : int = boundary[1]
a__ : Optional[int] = make_points(__snake_case , __snake_case , __snake_case )
a__ : str = 0.0
y += (h / 2.0) * f(__snake_case )
for i in x_i:
# print(i)
y += h * f(__snake_case )
y += (h / 2.0) * f(__snake_case )
return y
def A_ ( A__ , A__ , A__ ) -> List[str]:
a__ : Dict = a + h
while x < (b - h):
yield x
a__ : Optional[int] = x + h
def A_ ( A__ ) -> Dict: # enter your function here
a__ : Any = (x - 0) * (x - 0)
return y
def A_ ( ) -> List[Any]:
a__ : int = 0.0 # Lower bound of integration
a__ : List[Any] = 1.0 # Upper bound of integration
a__ : List[Any] = 10.0 # define number of steps or resolution
a__ : List[Any] = [a, b] # define boundary of integration
a__ : Optional[Any] = method_a(__snake_case , __snake_case )
print(F'y = {y}' )
if __name__ == "__main__":
main()
| 99
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {}
class A__ ( _A ):
lowerCAmelCase__ : Any = "llama"
lowerCAmelCase__ : List[str] = ["past_key_values"]
def __init__( self : Optional[int] , _UpperCAmelCase : Union[str, Any]=3_20_00 , _UpperCAmelCase : Any=40_96 , _UpperCAmelCase : Optional[Any]=1_10_08 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[Any]=32 , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Optional[int]="silu" , _UpperCAmelCase : List[Any]=20_48 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : List[Any]=1e-6 , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=0 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Optional[Any]=1 , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Tuple=None , **_UpperCAmelCase : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__lowercase = num_attention_heads
__lowercase = num_key_value_heads
__lowercase = hidden_act
__lowercase = initializer_range
__lowercase = rms_norm_eps
__lowercase = pretraining_tp
__lowercase = use_cache
__lowercase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"""got {self.rope_scaling}""" )
__lowercase = self.rope_scaling.get('type' , _UpperCAmelCase )
__lowercase = self.rope_scaling.get('factor' , _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 325
|
"""simple docstring"""
def lowercase ( __snake_case : int ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33
| 0
|
"""simple docstring"""
import numpy as np
from PIL import Image
def UpperCAmelCase__ ( lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> Union[str, Any]:
'''simple docstring'''
lowercase = np.array(__snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 0
# compute the shape of the output matrix
lowercase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowercase = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowercase = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowercase = 0
lowercase = 0
return updated_arr
def UpperCAmelCase__ ( lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
lowercase = np.array(__snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 0
# compute the shape of the output matrix
lowercase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowercase = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowercase = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowercase = 0
lowercase = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
__lowerCAmelCase : List[Any] =Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 197
|
"""simple docstring"""
def lowercase ( __snake_case : Optional[int] ):
lowercase_ : int = 0
lowercase_ : Optional[Any] = len(__snake_case )
for i in range(n - 1 ):
for j in range(i + 1 , __snake_case ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowercase ( __snake_case : str ):
if len(__snake_case ) <= 1:
return arr, 0
lowercase_ : Optional[Any] = len(__snake_case ) // 2
lowercase_ : List[Any] = arr[0:mid]
lowercase_ : Union[str, Any] = arr[mid:]
lowercase_ , lowercase_ : Tuple = count_inversions_recursive(__snake_case )
lowercase_ , lowercase_ : List[Any] = count_inversions_recursive(__snake_case )
lowercase_ , lowercase_ : List[Any] = _count_cross_inversions(__snake_case , __snake_case )
lowercase_ : List[Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowercase ( __snake_case : str , __snake_case : Optional[int] ):
lowercase_ : Optional[Any] = []
lowercase_ : Any = 0
while i < len(__snake_case ) and j < len(__snake_case ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__snake_case ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__snake_case ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowercase ( ):
lowercase_ : Union[str, Any] = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowercase_ : int = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : Dict = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __snake_case )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowercase_ : Dict = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : Dict = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __snake_case )
# an empty list should also have zero inversions
lowercase_ : List[Any] = []
lowercase_ : Any = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : List[str] = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __snake_case )
if __name__ == "__main__":
main()
| 33
| 0
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase__ = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def _A ( A__ ):
"""simple docstring"""
re.sub('''<n>''' , '''''' , __snake_case ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__snake_case ) )
| 104
|
"""simple docstring"""
__A : Any = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 33
| 0
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class snake_case__ (_A , _A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "pixel_values"
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TimmBackboneConfig
def __init__( self : str , __lowerCamelCase : Any , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
requires_backends(self , "timm" )
super().__init__(__lowerCamelCase )
a = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(__lowerCamelCase , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
a = getattr(__lowerCamelCase , "use_pretrained_backbone" , __lowerCamelCase )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
a = config.out_indices if getattr(__lowerCamelCase , "out_indices" , __lowerCamelCase ) is not None else (-1,)
a = timm.create_model(
config.backbone , pretrained=__lowerCamelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=__lowerCamelCase , **__lowerCamelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a = self._backbone.return_layers
a = {layer['''module''']: str(__lowerCamelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(__lowerCamelCase )
@classmethod
def __UpperCAmelCase ( cls : Dict , __lowerCamelCase : Union[str, Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : str ) -> Optional[Any]:
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
a = kwargs.pop("config" , TimmBackboneConfig() )
a = kwargs.pop("use_timm_backbone" , __lowerCamelCase )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
a = kwargs.pop("num_channels" , config.num_channels )
a = kwargs.pop("features_only" , config.features_only )
a = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
a = kwargs.pop("out_indices" , config.out_indices )
a = TimmBackboneConfig(
backbone=__lowerCamelCase , num_channels=__lowerCamelCase , features_only=__lowerCamelCase , use_pretrained_backbone=__lowerCamelCase , out_indices=__lowerCamelCase , )
return super()._from_config(__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : int ) -> int:
pass
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Union[str, Any] ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
a = return_dict if return_dict is not None else self.config.use_return_dict
a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a = self._all_layers
a = self._backbone(__lowerCamelCase , **__lowerCamelCase )
a = self._return_layers
a = tuple(hidden_states[i] for i in self.out_indices )
else:
a = self._backbone(__lowerCamelCase , **__lowerCamelCase )
a = None
a = tuple(__lowerCamelCase )
a = tuple(__lowerCamelCase ) if hidden_states is not None else None
if not return_dict:
a = (feature_maps,)
if output_hidden_states:
a = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=__lowerCamelCase , hidden_states=__lowerCamelCase , attentions=__lowerCamelCase )
| 107
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A : List[Any] = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33
| 0
|
'''simple docstring'''
import argparse
import copy
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
A_ = {}
with open(__snake_case ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
A_ = []
_list.append([line.split()[1], line.split()[2]] )
A_ = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
A_ = []
_list.append([line.split()[0], line.split()[2]] )
A_ = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
with open(__snake_case ) as f:
A_ = f.read(1 )
A_ = start_node
A_ = []
A_ = start_node
A_ = 0
while visiting not in first_solution:
A_ = 1_00_00
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__snake_case ) and k[0] not in first_solution:
A_ = k[1]
A_ = k[0]
first_solution.append(__snake_case )
A_ = distance_of_first_solution + int(__snake_case )
A_ = best_node
first_solution.append(__snake_case )
A_ = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
A_ = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_00_00
)
return first_solution, distance_of_first_solution
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[Any]:
A_ = []
for n in solution[1:-1]:
A_ = solution.index(__snake_case )
for kn in solution[1:-1]:
A_ = solution.index(__snake_case )
if n == kn:
continue
A_ = copy.deepcopy(__snake_case )
A_ = kn
A_ = n
A_ = 0
for k in _tmp[:-1]:
A_ = _tmp[_tmp.index(__snake_case ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
A_ = distance + int(i[1] )
_tmp.append(__snake_case )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
A_ = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda UpperCAmelCase__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]:
A_ = 1
A_ = first_solution
A_ = []
A_ = distance_of_first_solution
A_ = solution
while count <= iters:
A_ = find_neighborhood(__snake_case, __snake_case )
A_ = 0
A_ = neighborhood[index_of_best_solution]
A_ = len(__snake_case ) - 1
A_ = False
while not found:
A_ = 0
while i < len(__snake_case ):
if best_solution[i] != solution[i]:
A_ = best_solution[i]
A_ = solution[i]
break
A_ = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
A_ = True
A_ = best_solution[:-1]
A_ = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
A_ = cost
A_ = solution
else:
A_ = index_of_best_solution + 1
A_ = neighborhood[index_of_best_solution]
if len(__snake_case ) >= size:
tabu_list.pop(0 )
A_ = count + 1
return best_solution_ever, best_cost
def UpperCAmelCase__ ( UpperCAmelCase__=None ) -> Optional[int]:
A_ = generate_neighbours(args.File )
A_ = generate_first_solution(
args.File, __snake_case )
A_ = tabu_search(
__snake_case, __snake_case, __snake_case, args.Iterations, args.Size, )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 162
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__A : List[str] = '''examples/'''
__A : int = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__A : Dict = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__A : Optional[int] = '''README.md'''
def lowercase ( __snake_case : int , __snake_case : Any , __snake_case : int ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ : int = f.read()
lowercase_ , lowercase_ : List[str] = REPLACE_PATTERNS[pattern]
lowercase_ : Union[str, Any] = replace.replace('''VERSION''' , __snake_case )
lowercase_ : Optional[Any] = re_pattern.sub(__snake_case , __snake_case )
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__snake_case )
def lowercase ( __snake_case : int ):
for folder, directories, fnames in os.walk(__snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__snake_case , __snake_case ) , __snake_case , pattern='''examples''' )
def lowercase ( __snake_case : Optional[Any] , __snake_case : Optional[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__snake_case , __snake_case , __snake_case )
if not patch:
update_version_in_examples(__snake_case )
def lowercase ( ):
lowercase_ : Union[str, Any] = '''🤗 Transformers currently provides the following architectures'''
lowercase_ : Union[str, Any] = '''1. Want to contribute a new model?'''
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ : List[str] = f.readlines()
# Find the start of the list.
lowercase_ : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase_ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowercase_ : str = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__snake_case )
def lowercase ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowercase_ : List[Any] = f.read()
lowercase_ : List[str] = REPLACE_PATTERNS['''init'''][0].search(__snake_case ).groups()[0]
return packaging.version.parse(__snake_case )
def lowercase ( __snake_case : Optional[Any]=False ):
lowercase_ : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowercase_ : Optional[Any] = default_version.base_version
elif patch:
lowercase_ : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowercase_ : Optional[int] = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowercase_ : int = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__snake_case ) == 0:
lowercase_ : Dict = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__snake_case , patch=__snake_case )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowercase ( ):
lowercase_ : List[Any] = get_version()
lowercase_ : List[str] = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowercase_ : Any = current_version.base_version
# Check with the user we got that right.
lowercase_ : Tuple = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__snake_case ) == 0:
lowercase_ : str = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__snake_case )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__A : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 33
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=32 , _a=True , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = num_channels
lowerCamelCase = image_size
lowerCamelCase = min_resolution
lowerCamelCase = max_resolution
lowerCamelCase = do_resize
lowerCamelCase = size_divisor
lowerCamelCase = do_rescale
def _lowerCAmelCase ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __magic_name__ ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = GLPNImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = GLPNImageProcessingTester(self )
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size_divisor""" ) )
self.assertTrue(hasattr(_a , """resample""" ) )
self.assertTrue(hasattr(_a , """do_rescale""" ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 291
|
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowercase ( __snake_case : str , __snake_case : str , __snake_case : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
lowercase_ : Union[str, Any] = quote(__snake_case )
return hfh.hf_hub_url(__snake_case , __snake_case , repo_type='''dataset''' , revision=__snake_case )
| 33
| 0
|
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _lowercase ( _A ):
"""simple docstring"""
__A = "EncodecFeatureExtractor"
__A = ("T5Tokenizer", "T5TokenizerFast")
def __init__(self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
a = self.feature_extractor
a = False
def UpperCamelCase_ (self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True ):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=lowerCamelCase_ , language=lowerCamelCase_ , no_timestamps=lowerCamelCase_ )
def __call__(self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_ , **lowerCamelCase_ )
a = kwargs.pop("audio" , lowerCamelCase_ )
a = kwargs.pop("sampling_rate" , lowerCamelCase_ )
a = kwargs.pop("text" , lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
a = args[0]
a = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
a = self.tokenizer(lowerCamelCase_ , **lowerCamelCase_ )
if audio is not None:
a = self.feature_extractor(lowerCamelCase_ , *lowerCamelCase_ , sampling_rate=lowerCamelCase_ , **lowerCamelCase_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
a = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
a = audio_inputs['''padding_mask''']
return inputs
def UpperCamelCase_ (self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
a = kwargs.pop("audio" , lowerCamelCase_ )
a = kwargs.pop("padding_mask" , lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
a = args[0]
a = args[1:]
if audio_values is not None:
return self._decode_audio(lowerCamelCase_ , padding_mask=lowerCamelCase_ )
else:
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase_ (self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
a = to_numpy(lowerCamelCase_ )
a = audio_values.shape
if padding_mask is None:
return list(lowerCamelCase_ )
a = to_numpy(lowerCamelCase_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
a = seq_len - padding_mask.shape[-1]
a = 1 - self.feature_extractor.padding_value
a = np.pad(lowerCamelCase_ , ((0, 0), (0, difference)) , "constant" , constant_values=lowerCamelCase_ )
a = audio_values.tolist()
for i in range(lowerCamelCase_ ):
a = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
a = sliced_audio.reshape(lowerCamelCase_ , -1 )
return audio_values
| 227
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
def __init__( self : int , A : Tuple , A : int=3 , A : List[str]=32 , A : Dict=3 , A : Any=10 , A : Dict=[10, 20, 30, 40] , A : Optional[Any]=[1, 1, 2, 1] , A : Union[str, Any]=True , A : Optional[Any]=True , A : Any="relu" , A : Optional[Any]=3 , A : Tuple=None , ) -> Dict:
lowercase_ : str = parent
lowercase_ : List[Any] = batch_size
lowercase_ : Optional[int] = image_size
lowercase_ : int = num_channels
lowercase_ : int = embeddings_size
lowercase_ : str = hidden_sizes
lowercase_ : List[str] = depths
lowercase_ : Dict = is_training
lowercase_ : int = use_labels
lowercase_ : Any = hidden_act
lowercase_ : List[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Optional[Any] = len(A )
def A ( self : str ) -> Tuple:
lowercase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Union[str, Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def A ( self : Dict ) -> int:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A ( self : str , A : Tuple , A : str , A : str ) -> str:
lowercase_ : str = TFResNetModel(config=A )
lowercase_ : Union[str, Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : Any , A : int , A : List[Any] , A : Optional[Any] ) -> Optional[Any]:
lowercase_ : Tuple = self.num_labels
lowercase_ : Union[str, Any] = TFResNetForImageClassification(A )
lowercase_ : Tuple = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Union[str, Any] ) -> Tuple:
lowercase_ : Tuple = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Dict = config_and_inputs
lowercase_ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : List[Any] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Any = False
def A ( self : Union[str, Any] ) -> List[Any]:
lowercase_ : int = TFResNetModelTester(self )
lowercase_ : str = ConfigTester(self , config_class=A , has_text_modality=A )
def A ( self : Dict ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Dict ) -> List[Any]:
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def A ( self : Any ) -> Any:
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def A ( self : List[str] ) -> Optional[Any]:
pass
def A ( self : str ) -> Tuple:
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : int = model_class(A )
lowercase_ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : str = [*signature.parameters.keys()]
lowercase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def A ( self : List[str] ) -> Tuple:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A ( self : List[Any] ) -> List[str]:
def check_hidden_states_output(A : Union[str, Any] , A : int , A : List[Any] ):
lowercase_ : int = model_class(A )
lowercase_ : Optional[Any] = model(**self._prepare_for_class(A , A ) )
lowercase_ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Any = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Union[str, Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : List[str] = layer_type
lowercase_ : Tuple = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[Any] = True
check_hidden_states_output(A , A , A )
def A ( self : Optional[int] ) -> Tuple:
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def A ( self : List[str] ) -> Optional[int]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Tuple = TFResNetModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase ( ):
lowercase_ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def A ( self : Any ) -> Optional[int]:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : Any ) -> Optional[int]:
lowercase_ : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase_ : List[Any] = self.default_image_processor
lowercase_ : Dict = prepare_img()
lowercase_ : List[str] = image_processor(images=A , return_tensors='''tf''' )
# forward pass
lowercase_ : Tuple = model(**A )
# verify the logits
lowercase_ : Optional[int] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A )
lowercase_ : Optional[Any] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , A , atol=1e-4 ) )
| 33
| 0
|
"""simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __lowerCamelCase :
'''simple docstring'''
A_ : float
A_ : TreeNode | None = None
A_ : TreeNode | None = None
def A_ ( _lowerCAmelCase : TreeNode | None ):
"""simple docstring"""
def is_valid_tree(_lowerCAmelCase : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__snake_case, __snake_case ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__snake_case ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
_lowerCAmelCase : TreeNode | None, _lowerCAmelCase : float, _lowerCAmelCase : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left, __snake_case, node.data )
and is_binary_search_tree_recursive_check(
node.right, node.data, __snake_case )
)
return is_binary_search_tree_recursive_check(__snake_case, -float('''inf''' ), float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__A : Dict = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class _UpperCAmelCase ( unittest.TestCase , _A ):
def A ( self : List[Any] ) -> Dict:
lowercase_ : Optional[int] = load_tool('''text-question-answering''' )
self.tool.setup()
lowercase_ : Union[str, Any] = load_tool('''text-question-answering''' , remote=A )
def A ( self : Any ) -> List[str]:
lowercase_ : Union[str, Any] = self.tool(A , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : str ) -> List[str]:
lowercase_ : int = self.remote_tool(A , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : List[Any] ) -> int:
lowercase_ : Optional[Any] = self.tool(text=A , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : List[str] ) -> Optional[int]:
lowercase_ : int = self.remote_tool(text=A , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
| 33
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
UpperCAmelCase = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class lowerCAmelCase ( _A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = RealmTokenizer
def __init__( self : str , __lowercase : Optional[int]=None , __lowercase : int=None , __lowercase : Union[str, Any]=True , __lowercase : str="[UNK]" , __lowercase : Dict="[SEP]" , __lowercase : Dict="[PAD]" , __lowercase : List[str]="[CLS]" , __lowercase : List[str]="[MASK]" , __lowercase : Dict=True , __lowercase : Union[str, Any]=None , **__lowercase : int , ):
"""simple docstring"""
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
__lowercase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , __lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __lowercase ) != tokenize_chinese_chars
):
__lowercase =getattr(__lowercase , normalizer_state.pop('type' ) )
__lowercase =do_lower_case
__lowercase =strip_accents
__lowercase =tokenize_chinese_chars
__lowercase =normalizer_class(**__lowercase )
__lowercase =do_lower_case
def snake_case ( self : Union[str, Any] , __lowercase : int , **__lowercase : Dict ):
"""simple docstring"""
__lowercase =PaddingStrategy.MAX_LENGTH
__lowercase =text
__lowercase =kwargs.pop('text_pair' , __lowercase )
__lowercase =kwargs.pop('return_tensors' , __lowercase )
__lowercase ={
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(__lowercase ):
if batch_text_pair is not None:
__lowercase =batch_text_pair[idx]
else:
__lowercase =None
__lowercase =super().__call__(__lowercase , __lowercase , return_tensors=__lowercase , **__lowercase )
__lowercase =encoded_candidates.get('input_ids' )
__lowercase =encoded_candidates.get('attention_mask' )
__lowercase =encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(__lowercase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__lowercase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__lowercase )
__lowercase ={key: item for key, item in output_data.items() if len(__lowercase ) != 0}
return BatchEncoding(__lowercase , tensor_type=__lowercase )
def snake_case ( self : Any , __lowercase : Union[str, Any] , __lowercase : Optional[int]=None ):
"""simple docstring"""
__lowercase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self : List[str] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
"""simple docstring"""
__lowercase =[self.sep_token_id]
__lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self : List[Any] , __lowercase : str , __lowercase : Optional[str] = None ):
"""simple docstring"""
__lowercase =self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 141
|
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _UpperCAmelCase ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self : Any , A : int=None , **A : str ) -> Union[str, Any]:
super().__init__(features=A )
lowercase_ : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def A ( self : Dict , A : int ) -> List[Any]:
import torch
if isinstance(A , A ) and column:
if all(
isinstance(A , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(A )
return column
def A ( self : int , A : Any ) -> Optional[Any]:
import torch
if isinstance(A , (str, bytes, type(A )) ):
return value
elif isinstance(A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase_ : Any = {}
if isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowercase_ : Any = {'''dtype''': torch.intaa}
elif isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase_ : Dict = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A , PIL.Image.Image ):
lowercase_ : Dict = np.asarray(A )
return torch.tensor(A , **{**default_dtype, **self.torch_tensor_kwargs} )
def A ( self : Union[str, Any] , A : Optional[int] ) -> str:
import torch
# support for torch, tf, jax etc.
if hasattr(A , '''__array__''' ) and not isinstance(A , torch.Tensor ):
lowercase_ : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
elif isinstance(A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
return self._tensorize(A )
def A ( self : Dict , A : dict ) -> Tuple:
return map_nested(self._recursive_tensorize , A , map_list=A )
def A ( self : str , A : pa.Table ) -> Mapping:
lowercase_ : Optional[Any] = self.numpy_arrow_extractor().extract_row(A )
lowercase_ : str = self.python_features_decoder.decode_row(A )
return self.recursive_tensorize(A )
def A ( self : List[Any] , A : pa.Table ) -> "torch.Tensor":
lowercase_ : List[str] = self.numpy_arrow_extractor().extract_column(A )
lowercase_ : str = self.python_features_decoder.decode_column(A , pa_table.column_names[0] )
lowercase_ : Optional[int] = self.recursive_tensorize(A )
lowercase_ : Any = self._consolidate(A )
return column
def A ( self : List[str] , A : pa.Table ) -> Mapping:
lowercase_ : Optional[int] = self.numpy_arrow_extractor().extract_batch(A )
lowercase_ : int = self.python_features_decoder.decode_batch(A )
lowercase_ : Dict = self.recursive_tensorize(A )
for column_name in batch:
lowercase_ : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 33
| 0
|
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ) -> Optional[int]:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(100, 0.25) = }""")
print(F"""{price_plus_tax(125.50, 0.05) = }""")
| 269
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 33
| 0
|
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def A_ ( A__ ) -> str:
a__ : int = os.path.join(args.tf_model_dir , 'parameters.json' )
a__ : Any = json.loads(open(__snake_case ).read() )
if not params:
raise ValueError(
F'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith('.pt' ):
a__ : Dict = args.output + '''.pt'''
a__ : Any = OrderedDict()
with tf.device('/CPU:0' ):
a__ : int = tf.train.load_checkpoint(args.tf_model_dir )
a__ : Optional[int] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
a__ : int = reader.get_tensor(__snake_case ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
a__ : Optional[Any] = int(key_name[9] )
elif key_name.startswith('pasts/out' ):
a__ : Tuple = 8
a__ : str = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
a__ : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a__ : List[Any] = torch.tensor(__snake_case )
elif key_name.startswith('model/moe' ):
a__ : Optional[int] = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
a__ : List[str] = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
a__ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a__ : Tuple = torch.tensor(__snake_case )
elif key_name.endswith('/softmlp/kernel' ):
a__ : List[str] = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
a__ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a__ : List[str] = torch.tensor(__snake_case )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
a__ : Union[str, Any] = key_name[-9:-7]
for i in range(16 ):
a__ : Tuple = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
a__ : str = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
a__ : Tuple = torch.tensor(__snake_case )
elif key_name.startswith('model/mlp' ):
a__ : Optional[int] = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
a__ : List[Any] = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
a__ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a__ : Dict = torch.tensor(__snake_case )
elif key_name.endswith('/p1/bias' ):
a__ : Any = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
a__ : Any = vnp.copy() # same because it is one dimensional
a__ : int = torch.tensor(__snake_case )
elif key_name.endswith('/p2/kernel' ):
a__ : Optional[Any] = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
a__ : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a__ : int = torch.tensor(__snake_case )
elif key_name.endswith('/p2/bias' ):
a__ : Optional[int] = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
a__ : List[str] = vnp.copy() # same because it is one dimensional
a__ : int = torch.tensor(__snake_case )
elif key_name.startswith('model/ln' ):
a__ : Optional[Any] = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
a__ : str = '''model.blocks.%d.feed_forward.norm.bias''' % player
a__ : Any = vnp.copy() # same because it is one dimensional
a__ : int = torch.tensor(__snake_case )
elif key_name.endswith('/g' ):
a__ : Union[str, Any] = '''model.blocks.%d.feed_forward.norm.weight''' % player
a__ : Union[str, Any] = vnp.copy() # same because it is one dimensional
a__ : int = torch.tensor(__snake_case )
elif key_name.startswith('model/att' ):
a__ : Optional[int] = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
a__ : Dict = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
a__ : Tuple = state[:, 0, :, :]
a__ : Dict = state[:, 1, :, :]
a__ : Union[str, Any] = state[:, 2, :, :]
a__ : int = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
a__ : Optional[Any] = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
a__ : Union[str, Any] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
a__ : List[str] = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
a__ : str = torch.tensor(__snake_case )
a__ : str = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
a__ : Any = torch.tensor(__snake_case )
a__ : List[Any] = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
a__ : Any = torch.tensor(__snake_case )
elif key_name.endswith('/o/kernel' ):
a__ : Dict = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
a__ : Optional[int] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
a__ : Any = torch.tensor(__snake_case )
elif key_name.startswith('model/an' ):
a__ : str = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
a__ : Dict = '''model.blocks.%d.self_attn.norm.bias''' % player
a__ : Union[str, Any] = vnp.copy() # same because it is one dimensional
a__ : str = torch.tensor(__snake_case )
elif key_name.endswith('/g' ):
a__ : str = '''model.blocks.%d.self_attn.norm.weight''' % player
a__ : Optional[Any] = vnp.copy() # same because it is one dimensional
a__ : Any = torch.tensor(__snake_case )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
a__ : int = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
a__ : str = '''model.%s.weight''' % nlayer
a__ : int = vnp.copy() # same in embedded
a__ : int = torch.tensor(__snake_case )
if key_name.startswith('model/wte' ):
a__ : Dict = '''lm_head.weight'''
a__ : Tuple = vnp.copy() # same in embedded
a__ : Dict = torch.tensor(__snake_case )
elif key_name.startswith('model/wob' ):
a__ : int = '''final_logits_bias'''
a__ : Any = vnp.copy() # same in embedded
a__ : Optional[Any] = state.reshape((1, -1) )
a__ : Any = torch.tensor(__snake_case )
elif key_name == "model/dense/kernel":
a__ : List[str] = '''model.last_project.weight'''
a__ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a__ : Optional[Any] = torch.tensor(__snake_case )
elif key_name == "model/dense_1/bias":
a__ : Dict = '''model.last_project.bias'''
a__ : Tuple = vnp.copy() # same because it is one dimensional
a__ : Any = torch.tensor(__snake_case )
torch.save(__snake_case , args.output )
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
lowercase : Any = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 99
|
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__A : Union[str, Any] = logging.get_logger(__name__)
# General docstring
__A : Tuple = '''MobileNetV1Config'''
# Base docstring
__A : Union[str, Any] = '''google/mobilenet_v1_1.0_224'''
__A : Union[str, Any] = [1, 1_024, 7, 7]
# Image classification docstring
__A : Optional[Any] = '''google/mobilenet_v1_1.0_224'''
__A : List[Any] = '''tabby, tabby cat'''
__A : Union[str, Any] = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowercase ( __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Dict=None ):
lowercase_ : str = {}
if isinstance(__snake_case , __snake_case ):
lowercase_ : Union[str, Any] = model.mobilenet_va
else:
lowercase_ : Optional[Any] = model
lowercase_ : Union[str, Any] = '''MobilenetV1/Conv2d_0/'''
lowercase_ : Union[str, Any] = backbone.conv_stem.convolution.weight
lowercase_ : Optional[Any] = backbone.conv_stem.normalization.bias
lowercase_ : Union[str, Any] = backbone.conv_stem.normalization.weight
lowercase_ : Any = backbone.conv_stem.normalization.running_mean
lowercase_ : int = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
lowercase_ : Optional[int] = i + 1
lowercase_ : Union[str, Any] = i * 2
lowercase_ : Optional[Any] = backbone.layer[pt_index]
lowercase_ : Union[str, Any] = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
lowercase_ : str = pointer.convolution.weight
lowercase_ : int = pointer.normalization.bias
lowercase_ : Any = pointer.normalization.weight
lowercase_ : Dict = pointer.normalization.running_mean
lowercase_ : Union[str, Any] = pointer.normalization.running_var
lowercase_ : Any = backbone.layer[pt_index + 1]
lowercase_ : Union[str, Any] = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
lowercase_ : int = pointer.convolution.weight
lowercase_ : str = pointer.normalization.bias
lowercase_ : Tuple = pointer.normalization.weight
lowercase_ : Dict = pointer.normalization.running_mean
lowercase_ : Any = pointer.normalization.running_var
if isinstance(__snake_case , __snake_case ):
lowercase_ : Optional[Any] = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
lowercase_ : Any = model.classifier.weight
lowercase_ : Optional[int] = model.classifier.bias
return tf_to_pt_map
def lowercase ( __snake_case : Optional[int] , __snake_case : int , __snake_case : Dict ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
lowercase_ : Tuple = tf.train.list_variables(__snake_case )
lowercase_ : int = {}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
lowercase_ : Optional[Any] = tf.train.load_variable(__snake_case , __snake_case )
lowercase_ : Optional[int] = array
# Build TF to PyTorch weights loading map
lowercase_ : Any = _build_tf_to_pytorch_map(__snake_case , __snake_case , __snake_case )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
lowercase_ : Union[str, Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
lowercase_ : Any = np.transpose(__snake_case , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
lowercase_ : Optional[int] = array.squeeze().transpose()
else:
lowercase_ : Optional[int] = np.transpose(__snake_case , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
lowercase_ : str = torch.from_numpy(__snake_case )
tf_weights.pop(__snake_case , __snake_case )
tf_weights.pop(name + '''/RMSProp''' , __snake_case )
tf_weights.pop(name + '''/RMSProp_1''' , __snake_case )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , __snake_case )
logger.info(F'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' )
return model
def lowercase ( __snake_case : torch.Tensor , __snake_case : nn.Convad ):
lowercase_ , lowercase_ : Optional[int] = features.shape[-2:]
lowercase_ , lowercase_ : str = conv_layer.stride
lowercase_ , lowercase_ : Tuple = conv_layer.kernel_size
if in_height % stride_height == 0:
lowercase_ : Dict = max(kernel_height - stride_height , 0 )
else:
lowercase_ : List[Any] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
lowercase_ : str = max(kernel_width - stride_width , 0 )
else:
lowercase_ : int = max(kernel_width - (in_width % stride_width) , 0 )
lowercase_ : int = pad_along_width // 2
lowercase_ : Union[str, Any] = pad_along_width - pad_left
lowercase_ : Tuple = pad_along_height // 2
lowercase_ : List[str] = pad_along_height - pad_top
lowercase_ : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__snake_case , __snake_case , '''constant''' , 0.0 )
class _UpperCAmelCase ( nn.Module ):
def __init__( self : List[Any] , A : MobileNetVaConfig , A : int , A : int , A : int , A : Optional[int] = 1 , A : Optional[int] = 1 , A : bool = False , A : Optional[bool] = True , A : Optional[bool or str] = True , ) -> None:
super().__init__()
lowercase_ : int = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
lowercase_ : Tuple = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowercase_ : int = nn.Convad(
in_channels=A , out_channels=A , kernel_size=A , stride=A , padding=A , groups=A , bias=A , padding_mode='''zeros''' , )
if use_normalization:
lowercase_ : Optional[Any] = nn.BatchNormad(
num_features=A , eps=config.layer_norm_eps , momentum=0.9997 , affine=A , track_running_stats=A , )
else:
lowercase_ : Union[str, Any] = None
if use_activation:
if isinstance(A , A ):
lowercase_ : str = ACTaFN[use_activation]
elif isinstance(config.hidden_act , A ):
lowercase_ : Any = ACTaFN[config.hidden_act]
else:
lowercase_ : Tuple = config.hidden_act
else:
lowercase_ : Tuple = None
def A ( self : str , A : torch.Tensor ) -> torch.Tensor:
if self.config.tf_padding:
lowercase_ : List[Any] = apply_tf_padding(A , self.convolution )
lowercase_ : Optional[int] = self.convolution(A )
if self.normalization is not None:
lowercase_ : Union[str, Any] = self.normalization(A )
if self.activation is not None:
lowercase_ : Optional[int] = self.activation(A )
return features
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Optional[int] = MobileNetVaConfig
SCREAMING_SNAKE_CASE_ : int = load_tf_weights_in_mobilenet_va
SCREAMING_SNAKE_CASE_ : Optional[Any] = "mobilenet_v1"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "pixel_values"
SCREAMING_SNAKE_CASE_ : List[str] = False
def A ( self : Any , A : Union[nn.Linear, nn.Convad] ) -> None:
if isinstance(A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(A , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__A : Union[str, Any] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__A : List[str] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , _A , )
class _UpperCAmelCase ( _A ):
def __init__( self : str , A : MobileNetVaConfig , A : bool = True ) -> int:
super().__init__(A )
lowercase_ : Union[str, Any] = config
lowercase_ : List[str] = 32
lowercase_ : str = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowercase_ : Union[str, Any] = MobileNetVaConvLayer(
A , in_channels=config.num_channels , out_channels=A , kernel_size=3 , stride=2 , )
lowercase_ : Optional[Any] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowercase_ : List[Any] = nn.ModuleList()
for i in range(13 ):
lowercase_ : Dict = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowercase_ : str = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
A , in_channels=A , out_channels=A , kernel_size=3 , stride=strides[i] , groups=A , ) )
self.layer.append(
MobileNetVaConvLayer(
A , in_channels=A , out_channels=A , kernel_size=1 , ) )
lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def A ( self : Any , A : Optional[Any] ) -> Optional[int]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : List[Any] , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
lowercase_ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
lowercase_ : List[str] = self.conv_stem(A )
lowercase_ : Dict = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowercase_ : Optional[int] = layer_module(A )
if output_hidden_states:
lowercase_ : str = all_hidden_states + (hidden_states,)
lowercase_ : Tuple = hidden_states
if self.pooler is not None:
lowercase_ : Dict = torch.flatten(self.pooler(A ) , start_dim=1 )
else:
lowercase_ : Optional[Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A , pooler_output=A , hidden_states=A , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _A , )
class _UpperCAmelCase ( _A ):
def __init__( self : List[str] , A : MobileNetVaConfig ) -> None:
super().__init__(A )
lowercase_ : int = config.num_labels
lowercase_ : List[str] = MobileNetVaModel(A )
lowercase_ : Union[str, Any] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowercase_ : Tuple = nn.Dropout(config.classifier_dropout_prob , inplace=A )
lowercase_ : int = nn.Linear(A , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Optional[Any] , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : List[Any] = self.mobilenet_va(A , output_hidden_states=A , return_dict=A )
lowercase_ : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : Dict = self.classifier(self.dropout(A ) )
lowercase_ : int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ : List[str] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ : Optional[Any] = '''single_label_classification'''
else:
lowercase_ : Tuple = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase_ : str = MSELoss()
if self.num_labels == 1:
lowercase_ : List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ : List[str] = loss_fct(A , A )
elif self.config.problem_type == "single_label_classification":
lowercase_ : List[Any] = CrossEntropyLoss()
lowercase_ : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ : str = BCEWithLogitsLoss()
lowercase_ : List[Any] = loss_fct(A , A )
if not return_dict:
lowercase_ : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=A , logits=A , hidden_states=outputs.hidden_states , )
| 33
| 0
|
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> Dict:
return 1 if input_a == input_a else 0
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 325
|
"""simple docstring"""
def lowercase ( __snake_case : list[int] ):
lowercase_ : List[Any] = len(__snake_case )
for i in range(__snake_case ):
for j in range(i + 1 , __snake_case ):
if numbers[j] < numbers[i]:
lowercase_ , lowercase_ : Optional[int] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__A : int = input('''Enter numbers separated by a comma:\n''').strip()
__A : Any = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 33
| 0
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :list ) -> str:
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
lowercase = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase = unsorted[j - 1], unsorted[j]
lowercase = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
lowercase = unsorted[j + 1], unsorted[j]
lowercase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[str] =input("""Enter numbers separated by a comma:\n""").strip()
__lowerCAmelCase : str =[int(item) for item in user_input.split(""",""")]
print(F"""{cocktail_shaker_sort(unsorted) = }""")
| 197
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : List[Any] , A : Any , A : Tuple=7 , A : Tuple=3 , A : Optional[Any]=30 , A : List[Any]=4_00 , A : Tuple=True , A : Dict=None , A : List[str]=True , A : Optional[int]=[0.5, 0.5, 0.5] , A : Tuple=[0.5, 0.5, 0.5] , A : List[str]=True , A : List[Any]=1 / 2_55 , A : Union[str, Any]=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase_ : Optional[int] = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
lowercase_ : Optional[int] = parent
lowercase_ : str = batch_size
lowercase_ : Tuple = num_channels
lowercase_ : str = min_resolution
lowercase_ : Any = max_resolution
lowercase_ : str = do_resize
lowercase_ : Any = size
lowercase_ : Optional[int] = do_normalize
lowercase_ : List[str] = image_mean
lowercase_ : Optional[Any] = image_std
lowercase_ : int = do_rescale
lowercase_ : List[str] = rescale_factor
lowercase_ : int = do_pad
def A ( self : Any ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A ( self : Optional[Any] , A : int , A : int=False ) -> Tuple:
if not batched:
lowercase_ : Optional[int] = image_inputs[0]
if isinstance(A , Image.Image ):
lowercase_ , lowercase_ : int = image.size
else:
lowercase_ , lowercase_ : Tuple = image.shape[1], image.shape[2]
if w < h:
lowercase_ : int = int(self.size['''shortest_edge'''] * h / w )
lowercase_ : Optional[Any] = self.size['''shortest_edge''']
elif w > h:
lowercase_ : Optional[Any] = self.size['''shortest_edge''']
lowercase_ : Optional[int] = int(self.size['''shortest_edge'''] * w / h )
else:
lowercase_ : Any = self.size['''shortest_edge''']
lowercase_ : Any = self.size['''shortest_edge''']
else:
lowercase_ : Tuple = []
for image in image_inputs:
lowercase_ , lowercase_ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase_ : Union[str, Any] = max(A , key=lambda A : item[0] )[0]
lowercase_ : Optional[Any] = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCAmelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = YolosImageProcessor if is_vision_available() else None
def A ( self : Optional[int] ) -> Optional[int]:
lowercase_ : Optional[Any] = YolosImageProcessingTester(self )
@property
def A ( self : str ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Optional[int] ) -> List[str]:
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def A ( self : Dict ) -> Tuple:
lowercase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , A )
lowercase_ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , A )
def A ( self : Optional[int] ) -> Tuple:
pass
def A ( self : Tuple ) -> int:
# Initialize image_processing
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowercase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ , lowercase_ : Dict = self.image_processor_tester.get_expected_values(A , batched=A )
lowercase_ : str = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : str ) -> Any:
# Initialize image_processing
lowercase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowercase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : int = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ : Optional[int] = image_processing(A , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : List[Any] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Optional[int]:
# Initialize image_processing
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowercase_ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ : Any = image_processing(A , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : List[str] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Optional[Any]:
# Initialize image_processings
lowercase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
lowercase_ : Tuple = self.image_processing_class(do_resize=A , do_normalize=A , do_rescale=A )
# create random PyTorch tensors
lowercase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowercase_ : Union[str, Any] = image_processing_a.pad(A , return_tensors='''pt''' )
lowercase_ : List[Any] = image_processing_a(A , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def A ( self : str ) -> List[Any]:
# prepare image and target
lowercase_ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowercase_ : List[Any] = json.loads(f.read() )
lowercase_ : Tuple = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
lowercase_ : Union[str, Any] = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
lowercase_ : List[Any] = image_processing(images=A , annotations=A , return_tensors='''pt''' )
# verify pixel values
lowercase_ : Union[str, Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowercase_ : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowercase_ : Tuple = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowercase_ : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowercase_ : Any = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1e-3 ) )
# verify image_id
lowercase_ : List[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowercase_ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowercase_ : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify orig_size
lowercase_ : List[str] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowercase_ : Optional[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
@slow
def A ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
lowercase_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowercase_ : str = json.loads(f.read() )
lowercase_ : int = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
lowercase_ : List[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowercase_ : int = YolosImageProcessor(format='''coco_panoptic''' )
lowercase_ : Any = image_processing(images=A , annotations=A , masks_path=A , return_tensors='''pt''' )
# verify pixel values
lowercase_ : Optional[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowercase_ : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowercase_ : List[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowercase_ : str = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowercase_ : List[str] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1e-3 ) )
# verify image_id
lowercase_ : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowercase_ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowercase_ : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify masks
lowercase_ : Dict = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A )
# verify orig_size
lowercase_ : Tuple = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowercase_ : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
| 33
| 0
|
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase__ = {'''UserAgent''': UserAgent().random}
def _A ( A__ ):
"""simple docstring"""
__lowercase = script.contents[0]
__lowercase = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase_ :
"""simple docstring"""
def __init__( self : List[Any] ,lowercase__ : Any ):
__lowercase = F"https://www.instagram.com/{username}/"
__lowercase = self.get_json()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = requests.get(self.url ,headers=lowercase__ ).text
__lowercase = BeautifulSoup(lowercase__ ,'''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : int ):
return F"{self.__class__.__name__}(\'{self.username}\')"
def __str__( self : Dict ):
return F"{self.fullname} ({self.username}) is {self.biography}"
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
return self.user_data["username"]
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
return self.user_data["full_name"]
@property
def SCREAMING_SNAKE_CASE ( self : int ):
return self.user_data["biography"]
@property
def SCREAMING_SNAKE_CASE ( self : int ):
return self.user_data["business_email"]
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return self.user_data["external_url"]
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return self.user_data["edge_followed_by"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return self.user_data["edge_follow"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return self.user_data["profile_pic_url_hd"]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return self.user_data["is_verified"]
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return self.user_data["is_private"]
def _A ( A__ = "github" ):
"""simple docstring"""
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
__lowercase = InstagramUser(__snake_case )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __snake_case )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = InstagramUser('''github''')
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }')
| 104
|
"""simple docstring"""
def lowercase ( __snake_case : int = 1_0_0 ):
lowercase_ : str = 0
lowercase_ : List[Any] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 33
| 0
|
from __future__ import annotations
def __magic_name__ ( A : list, A : int | None = None, A : int | None = None ):
'''simple docstring'''
if start is None:
a = 0
if end is None:
a = len(__snake_case ) - 1
if start >= end:
return
a = (start + end) // 2
slowsort(__snake_case, __snake_case, __snake_case )
slowsort(__snake_case, mid + 1, __snake_case )
if sequence[end] < sequence[mid]:
a = sequence[mid], sequence[end]
slowsort(__snake_case, __snake_case, end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 107
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__A : str = parser.parse_args()
__A : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__A : Dict = CLIPImageProcessor()
__A : Union[str, Any] = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__A : List[str] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 33
| 0
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {'''vocab_file''': '''vocab.txt'''}
__lowerCamelCase = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
__lowerCamelCase = {
'''facebook/esm2_t6_8M_UR50D''': 1024,
'''facebook/esm2_t12_35M_UR50D''': 1024,
}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
with open(__snake_case, """r""" ) as f:
A_ = f.read().splitlines()
return [l.strip() for l in lines]
class A__ ( _A ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__ , UpperCamelCase__="<unk>" , UpperCamelCase__="<cls>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__="<eos>" , **UpperCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = load_vocab_file(UpperCamelCase__ )
A_ = dict(enumerate(self.all_tokens ) )
A_ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
A_ = unk_token
A_ = cls_token
A_ = pad_token
A_ = mask_token
A_ = eos_token
A_ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCamelCase__ , self.unk_token )
def snake_case_ ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCamelCase__ , self._token_to_id.get(self.unk_token ) )
def snake_case_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
return text.split()
def snake_case_ ( self , UpperCamelCase__=False ) -> List[Any]:
'''simple docstring'''
return len(self._id_to_token )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def snake_case_ ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCamelCase__ , self._token_to_id.get(self.unk_token ) )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCamelCase__ , self.unk_token )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
A_ = [self.cls_token_id]
A_ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
A_ = [1] + ([0] * len(UpperCamelCase__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCamelCase__ ) + [1]
return mask
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = os.path.join(UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(UpperCamelCase__ , """w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = False ) -> int:
'''simple docstring'''
return super()._add_tokens(UpperCamelCase__ , special_tokens=UpperCamelCase__ )
| 162
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Any = KandinskyVaaControlnetImgaImgPipeline
SCREAMING_SNAKE_CASE_ : Optional[int] = ["image_embeds", "negative_image_embeds", "image", "hint"]
SCREAMING_SNAKE_CASE_ : str = ["image_embeds", "negative_image_embeds", "image", "hint"]
SCREAMING_SNAKE_CASE_ : Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
SCREAMING_SNAKE_CASE_ : Dict = False
@property
def A ( self : Any ) -> Any:
return 32
@property
def A ( self : Optional[int] ) -> Any:
return 32
@property
def A ( self : Dict ) -> int:
return self.time_input_dim
@property
def A ( self : Tuple ) -> str:
return self.time_input_dim * 4
@property
def A ( self : Any ) -> str:
return 1_00
@property
def A ( self : str ) -> List[str]:
torch.manual_seed(0 )
lowercase_ : List[Any] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase_ : Dict = UNetaDConditionModel(**A )
return model
@property
def A ( self : Optional[Any] ) -> Union[str, Any]:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def A ( self : List[Any] ) -> Dict:
torch.manual_seed(0 )
lowercase_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ : Tuple = self.dummy_unet
lowercase_ : int = self.dummy_movq
lowercase_ : List[Any] = {
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowercase_ : str = DDIMScheduler(**A )
lowercase_ : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def A ( self : Optional[int] , A : int , A : List[str]=0 ) -> int:
lowercase_ : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A ) ).to(A )
lowercase_ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A )
# create init_image
lowercase_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A ) ).to(A )
lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ : Optional[Any] = Image.fromarray(np.uinta(A ) ).convert('''RGB''' ).resize((2_56, 2_56) )
# create hint
lowercase_ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A ) ).to(A )
if str(A ).startswith('''mps''' ):
lowercase_ : Optional[Any] = torch.manual_seed(A )
else:
lowercase_ : List[Any] = torch.Generator(device=A ).manual_seed(A )
lowercase_ : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def A ( self : Any ) -> List[Any]:
lowercase_ : List[str] = '''cpu'''
lowercase_ : Any = self.get_dummy_components()
lowercase_ : Any = self.pipeline_class(**A )
lowercase_ : int = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowercase_ : Dict = pipe(**self.get_dummy_inputs(A ) )
lowercase_ : str = output.images
lowercase_ : int = pipe(
**self.get_dummy_inputs(A ) , return_dict=A , )[0]
lowercase_ : Dict = image[0, -3:, -3:, -1]
lowercase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ : List[str] = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : Tuple ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Any ) -> Optional[int]:
lowercase_ : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
lowercase_ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowercase_ : Optional[int] = init_image.resize((5_12, 5_12) )
lowercase_ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
lowercase_ : Optional[int] = torch.from_numpy(np.array(A ) ).float() / 255.0
lowercase_ : Tuple = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowercase_ : Optional[Any] = '''A robot, 4k photo'''
lowercase_ : Tuple = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(A )
lowercase_ : Dict = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
lowercase_ : int = pipeline.to(A )
pipeline.set_progress_bar_config(disable=A )
lowercase_ : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase_ , lowercase_ : int = pipe_prior(
A , image=A , strength=0.85 , generator=A , negative_prompt='''''' , ).to_tuple()
lowercase_ : str = pipeline(
image=A , image_embeds=A , negative_image_embeds=A , hint=A , generator=A , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type='''np''' , )
lowercase_ : Optional[Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(A , A )
| 33
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a ):
"""simple docstring"""
lowerCamelCase = value
lowerCamelCase = None
lowerCamelCase = None
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a ):
"""simple docstring"""
lowerCamelCase = tree
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291
|
"""simple docstring"""
def lowercase ( __snake_case : int = 1_0_0_0 ):
lowercase_ , lowercase_ : str = 1, 1
lowercase_ : List[str] = 2
while True:
lowercase_ : Tuple = 0
lowercase_ : List[Any] = fa + fa
lowercase_ , lowercase_ : Optional[int] = fa, f
index += 1
for _ in str(__snake_case ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 33
| 0
|
def a( A : int , A : int ) -> Optional[Any]:
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def a( ) -> str:
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 227
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : Union[str, Any] = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "vit_mae"
def __init__( self : Dict , A : List[str]=7_68 , A : Any=12 , A : Union[str, Any]=12 , A : Tuple=30_72 , A : Any="gelu" , A : Tuple=0.0 , A : List[str]=0.0 , A : Tuple=0.02 , A : Tuple=1e-12 , A : int=2_24 , A : Dict=16 , A : int=3 , A : Tuple=True , A : Tuple=16 , A : Optional[Any]=5_12 , A : Union[str, Any]=8 , A : List[Any]=20_48 , A : Dict=0.75 , A : Any=False , **A : Optional[int] , ) -> Union[str, Any]:
super().__init__(**A )
lowercase_ : List[Any] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : int = initializer_range
lowercase_ : Dict = layer_norm_eps
lowercase_ : Optional[Any] = image_size
lowercase_ : str = patch_size
lowercase_ : Dict = num_channels
lowercase_ : Any = qkv_bias
lowercase_ : Union[str, Any] = decoder_num_attention_heads
lowercase_ : Optional[Any] = decoder_hidden_size
lowercase_ : List[str] = decoder_num_hidden_layers
lowercase_ : List[Any] = decoder_intermediate_size
lowercase_ : Optional[Any] = mask_ratio
lowercase_ : Optional[Any] = norm_pix_loss
| 33
| 0
|
"""simple docstring"""
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Tuple ):
"""simple docstring"""
_a = [1]
for i in range(2, __snake_case ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
_a = []
_a = list(range(__snake_case ) )
# Find permutation
while factorials:
_a = factorials.pop()
_a = divmod(__snake_case, __snake_case )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320
|
"""simple docstring"""
def lowercase ( __snake_case : int ):
if n == 1 or not isinstance(__snake_case , __snake_case ):
return 0
elif n == 2:
return 1
else:
lowercase_ : Dict = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase ( __snake_case : int ):
lowercase_ : str = 0
lowercase_ : List[str] = 2
while digits < n:
index += 1
lowercase_ : Any = len(str(fibonacci(__snake_case ) ) )
return index
def lowercase ( __snake_case : int = 1_0_0_0 ):
return fibonacci_digits_index(__snake_case )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 33
| 0
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCAmelCase = logging.get_logger(__name__)
# General docstring
UpperCAmelCase = '''MobileNetV1Config'''
# Base docstring
UpperCAmelCase = '''google/mobilenet_v1_1.0_224'''
UpperCAmelCase = [1, 1024, 7, 7]
# Image classification docstring
UpperCAmelCase = '''google/mobilenet_v1_1.0_224'''
UpperCAmelCase = '''tabby, tabby cat'''
UpperCAmelCase = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __UpperCamelCase ( lowercase__ : List[str], lowercase__ : Union[str, Any], lowercase__ : Dict=None ):
'''simple docstring'''
__lowercase ={}
if isinstance(__snake_case, __snake_case ):
__lowercase =model.mobilenet_va
else:
__lowercase =model
__lowercase ='''MobilenetV1/Conv2d_0/'''
__lowercase =backbone.conv_stem.convolution.weight
__lowercase =backbone.conv_stem.normalization.bias
__lowercase =backbone.conv_stem.normalization.weight
__lowercase =backbone.conv_stem.normalization.running_mean
__lowercase =backbone.conv_stem.normalization.running_var
for i in range(13 ):
__lowercase =i + 1
__lowercase =i * 2
__lowercase =backbone.layer[pt_index]
__lowercase =F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
__lowercase =pointer.convolution.weight
__lowercase =pointer.normalization.bias
__lowercase =pointer.normalization.weight
__lowercase =pointer.normalization.running_mean
__lowercase =pointer.normalization.running_var
__lowercase =backbone.layer[pt_index + 1]
__lowercase =F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
__lowercase =pointer.convolution.weight
__lowercase =pointer.normalization.bias
__lowercase =pointer.normalization.weight
__lowercase =pointer.normalization.running_mean
__lowercase =pointer.normalization.running_var
if isinstance(__snake_case, __snake_case ):
__lowercase ='''MobilenetV1/Logits/Conv2d_1c_1x1/'''
__lowercase =model.classifier.weight
__lowercase =model.classifier.bias
return tf_to_pt_map
def __UpperCamelCase ( lowercase__ : Optional[int], lowercase__ : int, lowercase__ : Dict ):
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
__lowercase =tf.train.list_variables(__snake_case )
__lowercase ={}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
__lowercase =tf.train.load_variable(__snake_case, __snake_case )
__lowercase =array
# Build TF to PyTorch weights loading map
__lowercase =_build_tf_to_pytorch_map(__snake_case, __snake_case, __snake_case )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
__lowercase =tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
__lowercase =np.transpose(__snake_case, (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
__lowercase =array.squeeze().transpose()
else:
__lowercase =np.transpose(__snake_case, (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
__lowercase =torch.from_numpy(__snake_case )
tf_weights.pop(__snake_case, __snake_case )
tf_weights.pop(name + '/RMSProp', __snake_case )
tf_weights.pop(name + '/RMSProp_1', __snake_case )
tf_weights.pop(name + '/ExponentialMovingAverage', __snake_case )
logger.info(F'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' )
return model
def __UpperCamelCase ( lowercase__ : torch.Tensor, lowercase__ : nn.Convad ):
'''simple docstring'''
__lowercase =features.shape[-2:]
__lowercase =conv_layer.stride
__lowercase =conv_layer.kernel_size
if in_height % stride_height == 0:
__lowercase =max(kernel_height - stride_height, 0 )
else:
__lowercase =max(kernel_height - (in_height % stride_height), 0 )
if in_width % stride_width == 0:
__lowercase =max(kernel_width - stride_width, 0 )
else:
__lowercase =max(kernel_width - (in_width % stride_width), 0 )
__lowercase =pad_along_width // 2
__lowercase =pad_along_width - pad_left
__lowercase =pad_along_height // 2
__lowercase =pad_along_height - pad_top
__lowercase =(pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__snake_case, __snake_case, 'constant', 0.0 )
class lowerCAmelCase ( nn.Module ):
def __init__( self : List[Any] , __lowercase : MobileNetVaConfig , __lowercase : int , __lowercase : int , __lowercase : int , __lowercase : Optional[int] = 1 , __lowercase : Optional[int] = 1 , __lowercase : bool = False , __lowercase : Optional[bool] = True , __lowercase : Optional[bool or str] = True , ):
"""simple docstring"""
super().__init__()
__lowercase =config
if in_channels % groups != 0:
raise ValueError(f'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(f'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
__lowercase =0 if config.tf_padding else int((kernel_size - 1) / 2 )
__lowercase =nn.Convad(
in_channels=__lowercase , out_channels=__lowercase , kernel_size=__lowercase , stride=__lowercase , padding=__lowercase , groups=__lowercase , bias=__lowercase , padding_mode='zeros' , )
if use_normalization:
__lowercase =nn.BatchNormad(
num_features=__lowercase , eps=config.layer_norm_eps , momentum=0.9_9_9_7 , affine=__lowercase , track_running_stats=__lowercase , )
else:
__lowercase =None
if use_activation:
if isinstance(__lowercase , __lowercase ):
__lowercase =ACTaFN[use_activation]
elif isinstance(config.hidden_act , __lowercase ):
__lowercase =ACTaFN[config.hidden_act]
else:
__lowercase =config.hidden_act
else:
__lowercase =None
def snake_case ( self : str , __lowercase : torch.Tensor ):
"""simple docstring"""
if self.config.tf_padding:
__lowercase =apply_tf_padding(__lowercase , self.convolution )
__lowercase =self.convolution(__lowercase )
if self.normalization is not None:
__lowercase =self.normalization(__lowercase )
if self.activation is not None:
__lowercase =self.activation(__lowercase )
return features
class lowerCAmelCase ( _A ):
lowerCAmelCase_ = MobileNetVaConfig
lowerCAmelCase_ = load_tf_weights_in_mobilenet_va
lowerCAmelCase_ = "mobilenet_v1"
lowerCAmelCase_ = "pixel_values"
lowerCAmelCase_ = False
def snake_case ( self : Any , __lowercase : Union[nn.Linear, nn.Convad] ):
"""simple docstring"""
if isinstance(__lowercase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowercase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCAmelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCAmelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , _A , )
class lowerCAmelCase ( _A ):
def __init__( self : str , __lowercase : MobileNetVaConfig , __lowercase : bool = True ):
"""simple docstring"""
super().__init__(__lowercase )
__lowercase =config
__lowercase =32
__lowercase =max(int(depth * config.depth_multiplier ) , config.min_depth )
__lowercase =MobileNetVaConvLayer(
__lowercase , in_channels=config.num_channels , out_channels=__lowercase , kernel_size=3 , stride=2 , )
__lowercase =[1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__lowercase =nn.ModuleList()
for i in range(13 ):
__lowercase =out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__lowercase =max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__lowercase , in_channels=__lowercase , out_channels=__lowercase , kernel_size=3 , stride=strides[i] , groups=__lowercase , ) )
self.layer.append(
MobileNetVaConvLayer(
__lowercase , in_channels=__lowercase , out_channels=__lowercase , kernel_size=1 , ) )
__lowercase =nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def snake_case ( self : Any , __lowercase : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case ( self : List[Any] , __lowercase : Optional[torch.Tensor] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[bool] = None , ):
"""simple docstring"""
__lowercase =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase =return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
__lowercase =self.conv_stem(__lowercase )
__lowercase =() if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__lowercase =layer_module(__lowercase )
if output_hidden_states:
__lowercase =all_hidden_states + (hidden_states,)
__lowercase =hidden_states
if self.pooler is not None:
__lowercase =torch.flatten(self.pooler(__lowercase ) , start_dim=1 )
else:
__lowercase =None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowercase , pooler_output=__lowercase , hidden_states=__lowercase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _A , )
class lowerCAmelCase ( _A ):
def __init__( self : List[str] , __lowercase : MobileNetVaConfig ):
"""simple docstring"""
super().__init__(__lowercase )
__lowercase =config.num_labels
__lowercase =MobileNetVaModel(__lowercase )
__lowercase =self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__lowercase =nn.Dropout(config.classifier_dropout_prob , inplace=__lowercase )
__lowercase =nn.Linear(__lowercase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case ( self : Optional[Any] , __lowercase : Optional[torch.Tensor] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[torch.Tensor] = None , __lowercase : Optional[bool] = None , ):
"""simple docstring"""
__lowercase =return_dict if return_dict is not None else self.config.use_return_dict
__lowercase =self.mobilenet_va(__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase )
__lowercase =outputs.pooler_output if return_dict else outputs[1]
__lowercase =self.classifier(self.dropout(__lowercase ) )
__lowercase =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowercase ='''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowercase ='''single_label_classification'''
else:
__lowercase ='''multi_label_classification'''
if self.config.problem_type == "regression":
__lowercase =MSELoss()
if self.num_labels == 1:
__lowercase =loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowercase =loss_fct(__lowercase , __lowercase )
elif self.config.problem_type == "single_label_classification":
__lowercase =CrossEntropyLoss()
__lowercase =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowercase =BCEWithLogitsLoss()
__lowercase =loss_fct(__lowercase , __lowercase )
if not return_dict:
__lowercase =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__lowercase , logits=__lowercase , hidden_states=outputs.hidden_states , )
| 141
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A : List[str] = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''MobileNetV2FeatureExtractor''']
__A : Optional[int] = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33
| 0
|
"""simple docstring"""
def _lowercase ( __snake_case ) -> Dict:
__lowerCAmelCase : List[Any] = generate_pascal_triangle(__snake_case )
for row_idx in range(__snake_case ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=" " )
else:
print(triangle[row_idx][col_idx] ,end="" )
print()
def _lowercase ( __snake_case ) -> str:
if not isinstance(__snake_case ,__snake_case ):
raise TypeError("The input value of \'num_rows\' should be \'int\'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of \'num_rows\' should be greater than or equal to 0" )
__lowerCAmelCase : list[list[int]] = []
for current_row_idx in range(__snake_case ):
__lowerCAmelCase : int = populate_current_row(__snake_case ,__snake_case )
triangle.append(__snake_case )
return triangle
def _lowercase ( __snake_case ,__snake_case ) -> Tuple:
__lowerCAmelCase : List[Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__lowerCAmelCase : Any = 1, 1
for current_col_idx in range(1 ,__snake_case ):
calculate_current_element(
__snake_case ,__snake_case ,__snake_case ,__snake_case )
return current_row
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ,) -> Optional[Any]:
__lowerCAmelCase : Dict = triangle[current_row_idx - 1][current_col_idx - 1]
__lowerCAmelCase : List[str] = triangle[current_row_idx - 1][current_col_idx]
__lowerCAmelCase : Dict = above_to_left_elt + above_to_right_elt
def _lowercase ( __snake_case ) -> int:
if not isinstance(__snake_case ,__snake_case ):
raise TypeError("The input value of \'num_rows\' should be \'int\'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of \'num_rows\' should be greater than or equal to 0" )
__lowerCAmelCase : list[list[int]] = [[1]]
for row_index in range(1 ,__snake_case ):
__lowerCAmelCase : Any = [0] + result[-1] + [0]
__lowerCAmelCase : Optional[Any] = row_index + 1
# Calculate the number of distinct elements in a row
__lowerCAmelCase : int = sum(divmod(__snake_case ,2 ) )
__lowerCAmelCase : str = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
__lowerCAmelCase : Union[str, Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__lowerCAmelCase : Optional[Any] = row_first_half + row_second_half
result.append(__snake_case )
return result
def _lowercase ( ) -> Optional[Any]:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__snake_case ,__snake_case ) -> None:
__lowerCAmelCase : int = F"""{func.__name__}({value})"""
__lowerCAmelCase : Optional[Any] = timeit(F"""__main__.{call}""" ,setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__snake_case ,__snake_case )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 269
|
"""simple docstring"""
from __future__ import annotations
__A : List[Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__A : str = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = []
lowercase_ : List[Any] = len(__snake_case )
for i in range(__snake_case ):
lowercase_ : float = -1
for j in range(i + 1 , __snake_case ):
if arr[i] < arr[j]:
lowercase_ : List[str] = arr[j]
break
result.append(__snake_case )
return result
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = []
for i, outer in enumerate(__snake_case ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : List[Any] = inner
break
result.append(__snake_case )
return result
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = len(__snake_case )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(__snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__A : int = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 33
| 0
|
def A_ ( A__ ) -> Dict:
a__ : int = 0
a__ : Optional[Any] = len(__snake_case )
for i in range(n - 1 ):
for j in range(i + 1 , __snake_case ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def A_ ( A__ ) -> Any:
if len(__snake_case ) <= 1:
return arr, 0
a__ : Optional[Any] = len(__snake_case ) // 2
a__ : List[Any] = arr[0:mid]
a__ : Union[str, Any] = arr[mid:]
a__ : Tuple = count_inversions_recursive(__snake_case )
a__ : List[Any] = count_inversions_recursive(__snake_case )
a__ : List[Any] = _count_cross_inversions(__snake_case , __snake_case )
a__ : List[Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def A_ ( A__ , A__ ) -> str:
a__ : Optional[Any] = []
a__ : Any = 0
while i < len(__snake_case ) and j < len(__snake_case ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__snake_case ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__snake_case ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def A_ ( ) -> List[str]:
a__ : Union[str, Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
a__ : int = count_inversions_bf(__snake_case )
a__ : Dict = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , __snake_case )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
a__ : Dict = count_inversions_bf(__snake_case )
a__ : Dict = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , __snake_case )
# an empty list should also have zero inversions
a__ : List[Any] = []
a__ : Any = count_inversions_bf(__snake_case )
a__ : List[str] = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , __snake_case )
if __name__ == "__main__":
main()
| 99
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33
| 0
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 325
|
"""simple docstring"""
def lowercase ( __snake_case : int ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33
| 0
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=3 , __lowerCAmelCase=32 , __lowerCAmelCase=3 , __lowerCAmelCase=10 , __lowerCAmelCase=[10, 20, 30, 40] , __lowerCAmelCase=[1, 1, 2, 1] , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase="relu" , __lowerCAmelCase=3 , __lowerCAmelCase=None , ):
"""simple docstring"""
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = embeddings_size
lowercase = hidden_sizes
lowercase = depths
lowercase = is_training
lowercase = use_labels
lowercase = hidden_act
lowercase = num_labels
lowercase = scope
lowercase = len(__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = TFResNetModel(config=__lowerCAmelCase )
lowercase = model(__lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.num_labels
lowercase = TFResNetForImageClassification(__lowerCAmelCase )
lowercase = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ):
"""simple docstring"""
lowercase = self.prepare_config_and_inputs()
lowercase = config_and_inputs
lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _A ( _A , _A , unittest.TestCase ):
snake_case__ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
snake_case__ : List[Any] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
snake_case__ : Optional[int] = False
snake_case__ : Optional[int] = False
snake_case__ : str = False
snake_case__ : Optional[int] = False
snake_case__ : Any = False
def A__ ( self ):
"""simple docstring"""
lowercase = TFResNetModelTester(self )
lowercase = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def A__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(__lowerCAmelCase )
lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowercase = model_class(__lowerCAmelCase )
lowercase = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase = layer_type
lowercase = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = TFResNetModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def UpperCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def A__ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=__lowerCAmelCase , return_tensors="""tf""" )
# forward pass
lowercase = model(**__lowerCAmelCase )
# verify the logits
lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
lowercase = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __lowerCAmelCase , atol=1E-4 ) )
| 197
|
"""simple docstring"""
def lowercase ( __snake_case : Optional[int] ):
lowercase_ : int = 0
lowercase_ : Optional[Any] = len(__snake_case )
for i in range(n - 1 ):
for j in range(i + 1 , __snake_case ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowercase ( __snake_case : str ):
if len(__snake_case ) <= 1:
return arr, 0
lowercase_ : Optional[Any] = len(__snake_case ) // 2
lowercase_ : List[Any] = arr[0:mid]
lowercase_ : Union[str, Any] = arr[mid:]
lowercase_ , lowercase_ : Tuple = count_inversions_recursive(__snake_case )
lowercase_ , lowercase_ : List[Any] = count_inversions_recursive(__snake_case )
lowercase_ , lowercase_ : List[Any] = _count_cross_inversions(__snake_case , __snake_case )
lowercase_ : List[Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowercase ( __snake_case : str , __snake_case : Optional[int] ):
lowercase_ : Optional[Any] = []
lowercase_ : Any = 0
while i < len(__snake_case ) and j < len(__snake_case ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__snake_case ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__snake_case ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowercase ( ):
lowercase_ : Union[str, Any] = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowercase_ : int = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : Dict = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __snake_case )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowercase_ : Dict = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : Dict = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __snake_case )
# an empty list should also have zero inversions
lowercase_ : List[Any] = []
lowercase_ : Any = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : List[str] = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __snake_case )
if __name__ == "__main__":
main()
| 33
| 0
|
'''simple docstring'''
lowerCAmelCase__ = [
(1000, '''M'''),
(900, '''CM'''),
(500, '''D'''),
(400, '''CD'''),
(100, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def _A ( A__ ):
"""simple docstring"""
__lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
__lowercase = 0
__lowercase = 0
while place < len(__snake_case ):
if (place + 1 < len(__snake_case )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def _A ( A__ ):
"""simple docstring"""
__lowercase = []
for arabic, roman in ROMAN:
(__lowercase) = divmod(__snake_case , __snake_case )
result.append(roman * factor )
if number == 0:
break
return "".join(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
|
"""simple docstring"""
__A : Any = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 33
| 0
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __magic_name__ ( A : str, A : str, A : Optional[str] = None ):
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
a = quote(__snake_case )
return hfh.hf_hub_url(__snake_case, __snake_case, repo_type="dataset", revision=__snake_case )
| 107
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A : List[Any] = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33
| 0
|
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A__ ( _A ):
lowercase = 0
lowercase = False
lowercase = 3.0
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=UpperCamelCase__ ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} )
@require_cuda
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
# If no defaults are changed, `to_kwargs` returns an empty dict.
A_ = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
A_ = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
A_ = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , UpperCamelCase__ )
@require_multi_gpu
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
__lowerCamelCase = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__lowerCamelCase = Accelerator(kwargs_handlers=[ddp_scaler])
__lowerCamelCase = torch.nn.Linear(100, 200)
__lowerCamelCase = accelerator.prepare(model)
# Check the values changed in kwargs
__lowerCamelCase = ''''''
__lowerCamelCase = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 162
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__A : List[str] = '''examples/'''
__A : int = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__A : Dict = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__A : Optional[int] = '''README.md'''
def lowercase ( __snake_case : int , __snake_case : Any , __snake_case : int ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ : int = f.read()
lowercase_ , lowercase_ : List[str] = REPLACE_PATTERNS[pattern]
lowercase_ : Union[str, Any] = replace.replace('''VERSION''' , __snake_case )
lowercase_ : Optional[Any] = re_pattern.sub(__snake_case , __snake_case )
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__snake_case )
def lowercase ( __snake_case : int ):
for folder, directories, fnames in os.walk(__snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__snake_case , __snake_case ) , __snake_case , pattern='''examples''' )
def lowercase ( __snake_case : Optional[Any] , __snake_case : Optional[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__snake_case , __snake_case , __snake_case )
if not patch:
update_version_in_examples(__snake_case )
def lowercase ( ):
lowercase_ : Union[str, Any] = '''🤗 Transformers currently provides the following architectures'''
lowercase_ : Union[str, Any] = '''1. Want to contribute a new model?'''
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ : List[str] = f.readlines()
# Find the start of the list.
lowercase_ : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase_ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowercase_ : str = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__snake_case )
def lowercase ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowercase_ : List[Any] = f.read()
lowercase_ : List[str] = REPLACE_PATTERNS['''init'''][0].search(__snake_case ).groups()[0]
return packaging.version.parse(__snake_case )
def lowercase ( __snake_case : Optional[Any]=False ):
lowercase_ : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowercase_ : Optional[Any] = default_version.base_version
elif patch:
lowercase_ : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowercase_ : Optional[int] = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowercase_ : int = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__snake_case ) == 0:
lowercase_ : Dict = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__snake_case , patch=__snake_case )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowercase ( ):
lowercase_ : List[Any] = get_version()
lowercase_ : List[str] = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowercase_ : Any = current_version.base_version
# Check with the user we got that right.
lowercase_ : Tuple = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__snake_case ) == 0:
lowercase_ : str = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__snake_case )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__A : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 33
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
lowerCamelCase = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = '''sshleifer/tiny-gpt2'''
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
lowerCamelCase = PyTorchBenchmark(_a )
lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = '''sgugger/tiny-distilbert-classification'''
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , only_pretrain_model=_a , )
lowerCamelCase = PyTorchBenchmark(_a )
lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = '''sshleifer/tiny-gpt2'''
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , torchscript=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
lowerCamelCase = PyTorchBenchmark(_a )
lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = '''sshleifer/tiny-gpt2'''
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , fpaa=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
lowerCamelCase = PyTorchBenchmark(_a )
lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = '''sshleifer/tiny-gpt2'''
lowerCamelCase = AutoConfig.from_pretrained(_a )
# set architectures equal to `None`
lowerCamelCase = None
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
lowerCamelCase = PyTorchBenchmark(_a , configs=[config] )
lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = '''sshleifer/tiny-gpt2'''
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
lowerCamelCase = PyTorchBenchmark(_a )
lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can\'t do half precision""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = '''sshleifer/tiny-gpt2'''
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_a , multi_process=_a , )
lowerCamelCase = PyTorchBenchmark(_a )
lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = '''sshleifer/tiny-gpt2'''
lowerCamelCase = AutoConfig.from_pretrained(_a )
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
lowerCamelCase = PyTorchBenchmark(_a , configs=[config] )
lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = '''sshleifer/tinier_bart'''
lowerCamelCase = AutoConfig.from_pretrained(_a )
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
lowerCamelCase = PyTorchBenchmark(_a , configs=[config] )
lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = '''sshleifer/tiny-gpt2'''
lowerCamelCase = AutoConfig.from_pretrained(_a )
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
lowerCamelCase = PyTorchBenchmark(_a , configs=[config] )
lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = '''sshleifer/tinier_bart'''
lowerCamelCase = AutoConfig.from_pretrained(_a )
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
lowerCamelCase = PyTorchBenchmark(_a , configs=[config] )
lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , save_to_csv=_a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_a , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(_a , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(_a , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(_a , """train_time.csv""" ) , env_info_csv_file=os.path.join(_a , """env.csv""" ) , multi_process=_a , )
lowerCamelCase = PyTorchBenchmark(_a )
benchmark.run()
self.assertTrue(Path(os.path.join(_a , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_a , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_a , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_a , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_a , """env.csv""" ) ).exists() )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_a ):
self.assertTrue(hasattr(_a , """sequential""" ) )
self.assertTrue(hasattr(_a , """cumulative""" ) )
self.assertTrue(hasattr(_a , """current""" ) )
self.assertTrue(hasattr(_a , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_a , """log.txt""" ) , log_print=_a , trace_memory_line_by_line=_a , multi_process=_a , )
lowerCamelCase = PyTorchBenchmark(_a )
lowerCamelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_a , """log.txt""" ) ).exists() )
| 291
|
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowercase ( __snake_case : str , __snake_case : str , __snake_case : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
lowercase_ : Union[str, Any] = quote(__snake_case )
return hfh.hf_hub_url(__snake_case , __snake_case , repo_type='''dataset''' , revision=__snake_case )
| 33
| 0
|
def a( A : int = 1000 ) -> str:
"""simple docstring"""
a = 1, 1
a = 2
while True:
a = 0
a = fa + fa
a = fa, f
index += 1
for _ in str(__snake_case ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 227
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
def __init__( self : int , A : Tuple , A : int=3 , A : List[str]=32 , A : Dict=3 , A : Any=10 , A : Dict=[10, 20, 30, 40] , A : Optional[Any]=[1, 1, 2, 1] , A : Union[str, Any]=True , A : Optional[Any]=True , A : Any="relu" , A : Optional[Any]=3 , A : Tuple=None , ) -> Dict:
lowercase_ : str = parent
lowercase_ : List[Any] = batch_size
lowercase_ : Optional[int] = image_size
lowercase_ : int = num_channels
lowercase_ : int = embeddings_size
lowercase_ : str = hidden_sizes
lowercase_ : List[str] = depths
lowercase_ : Dict = is_training
lowercase_ : int = use_labels
lowercase_ : Any = hidden_act
lowercase_ : List[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Optional[Any] = len(A )
def A ( self : str ) -> Tuple:
lowercase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Union[str, Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def A ( self : Dict ) -> int:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A ( self : str , A : Tuple , A : str , A : str ) -> str:
lowercase_ : str = TFResNetModel(config=A )
lowercase_ : Union[str, Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : Any , A : int , A : List[Any] , A : Optional[Any] ) -> Optional[Any]:
lowercase_ : Tuple = self.num_labels
lowercase_ : Union[str, Any] = TFResNetForImageClassification(A )
lowercase_ : Tuple = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Union[str, Any] ) -> Tuple:
lowercase_ : Tuple = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Dict = config_and_inputs
lowercase_ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : List[Any] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Any = False
def A ( self : Union[str, Any] ) -> List[Any]:
lowercase_ : int = TFResNetModelTester(self )
lowercase_ : str = ConfigTester(self , config_class=A , has_text_modality=A )
def A ( self : Dict ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Dict ) -> List[Any]:
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def A ( self : Any ) -> Any:
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def A ( self : List[str] ) -> Optional[Any]:
pass
def A ( self : str ) -> Tuple:
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : int = model_class(A )
lowercase_ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : str = [*signature.parameters.keys()]
lowercase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def A ( self : List[str] ) -> Tuple:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A ( self : List[Any] ) -> List[str]:
def check_hidden_states_output(A : Union[str, Any] , A : int , A : List[Any] ):
lowercase_ : int = model_class(A )
lowercase_ : Optional[Any] = model(**self._prepare_for_class(A , A ) )
lowercase_ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Any = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Union[str, Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : List[str] = layer_type
lowercase_ : Tuple = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[Any] = True
check_hidden_states_output(A , A , A )
def A ( self : Optional[int] ) -> Tuple:
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def A ( self : List[str] ) -> Optional[int]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Tuple = TFResNetModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase ( ):
lowercase_ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def A ( self : Any ) -> Optional[int]:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : Any ) -> Optional[int]:
lowercase_ : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase_ : List[Any] = self.default_image_processor
lowercase_ : Dict = prepare_img()
lowercase_ : List[str] = image_processor(images=A , return_tensors='''tf''' )
# forward pass
lowercase_ : Tuple = model(**A )
# verify the logits
lowercase_ : Optional[int] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A )
lowercase_ : Optional[Any] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , A , atol=1e-4 ) )
| 33
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class __lowerCamelCase ( _A ):
'''simple docstring'''
A_ : int = "deta"
A_ : List[str] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=900 , __UpperCAmelCase=2048 , __UpperCAmelCase=6 , __UpperCAmelCase=2048 , __UpperCAmelCase=8 , __UpperCAmelCase=6 , __UpperCAmelCase=1024 , __UpperCAmelCase=8 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1.0 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="sine" , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=True , __UpperCAmelCase=300 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=1 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.25 , **__UpperCAmelCase , ) -> Dict:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_a = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_a = backbone_config.pop('''model_type''' )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(__UpperCAmelCase )
_a = backbone_config
_a = num_queries
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = auxiliary_loss
_a = position_embedding_type
# deformable attributes
_a = num_feature_levels
_a = encoder_n_points
_a = decoder_n_points
_a = two_stage
_a = two_stage_num_proposals
_a = with_box_refine
_a = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
_a = focal_alpha
super().__init__(is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase )
@property
def _UpperCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def _UpperCAmelCase ( self ) -> int:
return self.d_model
def _UpperCAmelCase ( self ) -> Dict:
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
| 320
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__A : Dict = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class _UpperCAmelCase ( unittest.TestCase , _A ):
def A ( self : List[Any] ) -> Dict:
lowercase_ : Optional[int] = load_tool('''text-question-answering''' )
self.tool.setup()
lowercase_ : Union[str, Any] = load_tool('''text-question-answering''' , remote=A )
def A ( self : Any ) -> List[str]:
lowercase_ : Union[str, Any] = self.tool(A , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : str ) -> List[str]:
lowercase_ : int = self.remote_tool(A , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : List[Any] ) -> int:
lowercase_ : Optional[Any] = self.tool(text=A , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : List[str] ) -> Optional[int]:
lowercase_ : int = self.remote_tool(text=A , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
| 33
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class lowerCAmelCase ( _A ):
lowerCAmelCase_ = "beit"
def __init__( self : Dict , __lowercase : Union[str, Any]=8192 , __lowercase : str=768 , __lowercase : Optional[int]=12 , __lowercase : Union[str, Any]=12 , __lowercase : Union[str, Any]=3072 , __lowercase : str="gelu" , __lowercase : Dict=0.0 , __lowercase : List[Any]=0.0 , __lowercase : Optional[int]=0.0_2 , __lowercase : Union[str, Any]=1E-12 , __lowercase : Any=224 , __lowercase : int=16 , __lowercase : Dict=3 , __lowercase : Tuple=False , __lowercase : Optional[Any]=False , __lowercase : List[str]=False , __lowercase : List[Any]=False , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : str=True , __lowercase : Tuple=[3, 5, 7, 11] , __lowercase : Dict=[1, 2, 3, 6] , __lowercase : str=True , __lowercase : Tuple=0.4 , __lowercase : List[Any]=256 , __lowercase : str=1 , __lowercase : Union[str, Any]=False , __lowercase : List[str]=255 , **__lowercase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**__lowercase )
__lowercase =vocab_size
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =intermediate_size
__lowercase =hidden_act
__lowercase =hidden_dropout_prob
__lowercase =attention_probs_dropout_prob
__lowercase =initializer_range
__lowercase =layer_norm_eps
__lowercase =image_size
__lowercase =patch_size
__lowercase =num_channels
__lowercase =use_mask_token
__lowercase =use_absolute_position_embeddings
__lowercase =use_relative_position_bias
__lowercase =use_shared_relative_position_bias
__lowercase =layer_scale_init_value
__lowercase =drop_path_rate
__lowercase =use_mean_pooling
# decode head attributes (semantic segmentation)
__lowercase =out_indices
__lowercase =pool_scales
# auxiliary head attributes (semantic segmentation)
__lowercase =use_auxiliary_head
__lowercase =auxiliary_loss_weight
__lowercase =auxiliary_channels
__lowercase =auxiliary_num_convs
__lowercase =auxiliary_concat_input
__lowercase =semantic_loss_ignore_index
class lowerCAmelCase ( _A ):
lowerCAmelCase_ = version.parse("1.11" )
@property
def snake_case ( self : int ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case ( self : Dict ):
"""simple docstring"""
return 1E-4
| 141
|
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _UpperCAmelCase ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self : Any , A : int=None , **A : str ) -> Union[str, Any]:
super().__init__(features=A )
lowercase_ : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def A ( self : Dict , A : int ) -> List[Any]:
import torch
if isinstance(A , A ) and column:
if all(
isinstance(A , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(A )
return column
def A ( self : int , A : Any ) -> Optional[Any]:
import torch
if isinstance(A , (str, bytes, type(A )) ):
return value
elif isinstance(A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase_ : Any = {}
if isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowercase_ : Any = {'''dtype''': torch.intaa}
elif isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase_ : Dict = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A , PIL.Image.Image ):
lowercase_ : Dict = np.asarray(A )
return torch.tensor(A , **{**default_dtype, **self.torch_tensor_kwargs} )
def A ( self : Union[str, Any] , A : Optional[int] ) -> str:
import torch
# support for torch, tf, jax etc.
if hasattr(A , '''__array__''' ) and not isinstance(A , torch.Tensor ):
lowercase_ : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
elif isinstance(A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
return self._tensorize(A )
def A ( self : Dict , A : dict ) -> Tuple:
return map_nested(self._recursive_tensorize , A , map_list=A )
def A ( self : str , A : pa.Table ) -> Mapping:
lowercase_ : Optional[Any] = self.numpy_arrow_extractor().extract_row(A )
lowercase_ : str = self.python_features_decoder.decode_row(A )
return self.recursive_tensorize(A )
def A ( self : List[Any] , A : pa.Table ) -> "torch.Tensor":
lowercase_ : List[str] = self.numpy_arrow_extractor().extract_column(A )
lowercase_ : str = self.python_features_decoder.decode_column(A , pa_table.column_names[0] )
lowercase_ : Optional[int] = self.recursive_tensorize(A )
lowercase_ : Any = self._consolidate(A )
return column
def A ( self : List[str] , A : pa.Table ) -> Mapping:
lowercase_ : Optional[int] = self.numpy_arrow_extractor().extract_batch(A )
lowercase_ : int = self.python_features_decoder.decode_batch(A )
lowercase_ : Dict = self.recursive_tensorize(A )
for column_name in batch:
lowercase_ : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 33
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__snake_case : Optional[Any] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
__snake_case : str = {
'''camembert-base''': 512,
}
__snake_case : Any = '''▁'''
class A__ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self: Any , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Optional[Any]="<s>" , _SCREAMING_SNAKE_CASE: Optional[Any]="</s>" , _SCREAMING_SNAKE_CASE: str="</s>" , _SCREAMING_SNAKE_CASE: Optional[int]="<s>" , _SCREAMING_SNAKE_CASE: List[str]="<unk>" , _SCREAMING_SNAKE_CASE: List[Any]="<pad>" , _SCREAMING_SNAKE_CASE: Optional[Any]="<mask>" , _SCREAMING_SNAKE_CASE: Optional[Any]=["<s>NOTUSED", "</s>NOTUSED"] , _SCREAMING_SNAKE_CASE: Optional[Dict[str, Any]] = None , **_SCREAMING_SNAKE_CASE: List[Any] , ) -> None:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) else mask_token
__lowerCAmelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(_SCREAMING_SNAKE_CASE))
__lowerCAmelCase : Union[str, Any] = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__lowerCAmelCase : List[Any] = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
__lowerCAmelCase : Dict = len(self.fairseq_tokens_to_ids)
__lowerCAmelCase : List[Any] = len(self.sp_model) + len(self.fairseq_tokens_to_ids)
__lowerCAmelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase : List[str] = [self.cls_token_id]
__lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None , _SCREAMING_SNAKE_CASE: bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE)
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE)) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE)) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE)) + [1]
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = [self.sep_token_id]
__lowerCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Union[str, Any]:
"""simple docstring"""
return len(self.fairseq_tokens_to_ids) + len(self.sp_model)
def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: str) -> Optional[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: List[str]) -> Dict:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Tuple) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = []
__lowerCAmelCase : int = ''''''
__lowerCAmelCase : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE) + token
__lowerCAmelCase : Dict = True
__lowerCAmelCase : Any = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = False
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE)
return out_string.strip()
def __getstate__( self: Dict) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.__dict__.copy()
__lowerCAmelCase : Tuple = None
return state
def __setstate__( self: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
__lowerCAmelCase : List[str] = {}
__lowerCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
__lowerCAmelCase : Union[str, Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(_SCREAMING_SNAKE_CASE) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE)
elif not os.path.isfile(self.vocab_file):
with open(_SCREAMING_SNAKE_CASE , "wb") as fi:
__lowerCAmelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
| 269
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 33
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : List[str] = '''▁'''
lowercase : Any = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowercase : Union[str, Any] = {
'''vocab_file''': {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowercase : Optional[Any] = {
'''xlm-roberta-base''': 5_1_2,
'''xlm-roberta-large''': 5_1_2,
'''xlm-roberta-large-finetuned-conll02-dutch''': 5_1_2,
'''xlm-roberta-large-finetuned-conll02-spanish''': 5_1_2,
'''xlm-roberta-large-finetuned-conll03-english''': 5_1_2,
'''xlm-roberta-large-finetuned-conll03-german''': 5_1_2,
}
class A__ ( _A ):
"""simple docstring"""
__A : Dict = VOCAB_FILES_NAMES
__A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Tuple = ["input_ids", "attention_mask"]
def __init__( self , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase = None , **lowercase , ) -> None:
'''simple docstring'''
a__ : Any = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else mask_token
a__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
a__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowercase))
a__ : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
a__ : List[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a__ : List[Any] = 1
a__ : Tuple = len(self.sp_model) + self.fairseq_offset
a__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self) -> Dict:
'''simple docstring'''
a__ : Tuple = self.__dict__.copy()
a__ : str = None
a__ : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowercase) -> int:
'''simple docstring'''
a__ : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
a__ : Dict = {}
a__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def __lowercase ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a__ : Optional[int] = [self.cls_token_id]
a__ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self , lowercase , lowercase = None , lowercase = False) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase)
if token_ids_a is None:
return [1] + ([0] * len(lowercase)) + [1]
return [1] + ([0] * len(lowercase)) + [1, 1] + ([0] * len(lowercase)) + [1]
def __lowercase ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__ : str = [self.sep_token_id]
a__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : List[str] = {self.convert_ids_to_tokens(lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __lowercase ( self , lowercase) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowercase , out_type=lowercase)
def __lowercase ( self , lowercase) -> List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a__ : Dict = self.sp_model.PieceToId(lowercase)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowercase ( self , lowercase) -> Union[str, Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def __lowercase ( self , lowercase) -> List[Any]:
'''simple docstring'''
a__ : Tuple = ''''''.join(lowercase).replace(lowercase , ' ').strip()
return out_string
def __lowercase ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowercase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
a__ : int = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase)
elif not os.path.isfile(self.vocab_file):
with open(lowercase , 'wb') as fi:
a__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase)
return (out_vocab_file,)
| 99
|
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__A : Union[str, Any] = logging.get_logger(__name__)
# General docstring
__A : Tuple = '''MobileNetV1Config'''
# Base docstring
__A : Union[str, Any] = '''google/mobilenet_v1_1.0_224'''
__A : Union[str, Any] = [1, 1_024, 7, 7]
# Image classification docstring
__A : Optional[Any] = '''google/mobilenet_v1_1.0_224'''
__A : List[Any] = '''tabby, tabby cat'''
__A : Union[str, Any] = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowercase ( __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Dict=None ):
lowercase_ : str = {}
if isinstance(__snake_case , __snake_case ):
lowercase_ : Union[str, Any] = model.mobilenet_va
else:
lowercase_ : Optional[Any] = model
lowercase_ : Union[str, Any] = '''MobilenetV1/Conv2d_0/'''
lowercase_ : Union[str, Any] = backbone.conv_stem.convolution.weight
lowercase_ : Optional[Any] = backbone.conv_stem.normalization.bias
lowercase_ : Union[str, Any] = backbone.conv_stem.normalization.weight
lowercase_ : Any = backbone.conv_stem.normalization.running_mean
lowercase_ : int = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
lowercase_ : Optional[int] = i + 1
lowercase_ : Union[str, Any] = i * 2
lowercase_ : Optional[Any] = backbone.layer[pt_index]
lowercase_ : Union[str, Any] = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
lowercase_ : str = pointer.convolution.weight
lowercase_ : int = pointer.normalization.bias
lowercase_ : Any = pointer.normalization.weight
lowercase_ : Dict = pointer.normalization.running_mean
lowercase_ : Union[str, Any] = pointer.normalization.running_var
lowercase_ : Any = backbone.layer[pt_index + 1]
lowercase_ : Union[str, Any] = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
lowercase_ : int = pointer.convolution.weight
lowercase_ : str = pointer.normalization.bias
lowercase_ : Tuple = pointer.normalization.weight
lowercase_ : Dict = pointer.normalization.running_mean
lowercase_ : Any = pointer.normalization.running_var
if isinstance(__snake_case , __snake_case ):
lowercase_ : Optional[Any] = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
lowercase_ : Any = model.classifier.weight
lowercase_ : Optional[int] = model.classifier.bias
return tf_to_pt_map
def lowercase ( __snake_case : Optional[int] , __snake_case : int , __snake_case : Dict ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
lowercase_ : Tuple = tf.train.list_variables(__snake_case )
lowercase_ : int = {}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
lowercase_ : Optional[Any] = tf.train.load_variable(__snake_case , __snake_case )
lowercase_ : Optional[int] = array
# Build TF to PyTorch weights loading map
lowercase_ : Any = _build_tf_to_pytorch_map(__snake_case , __snake_case , __snake_case )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
lowercase_ : Union[str, Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
lowercase_ : Any = np.transpose(__snake_case , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
lowercase_ : Optional[int] = array.squeeze().transpose()
else:
lowercase_ : Optional[int] = np.transpose(__snake_case , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
lowercase_ : str = torch.from_numpy(__snake_case )
tf_weights.pop(__snake_case , __snake_case )
tf_weights.pop(name + '''/RMSProp''' , __snake_case )
tf_weights.pop(name + '''/RMSProp_1''' , __snake_case )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , __snake_case )
logger.info(F'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' )
return model
def lowercase ( __snake_case : torch.Tensor , __snake_case : nn.Convad ):
lowercase_ , lowercase_ : Optional[int] = features.shape[-2:]
lowercase_ , lowercase_ : str = conv_layer.stride
lowercase_ , lowercase_ : Tuple = conv_layer.kernel_size
if in_height % stride_height == 0:
lowercase_ : Dict = max(kernel_height - stride_height , 0 )
else:
lowercase_ : List[Any] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
lowercase_ : str = max(kernel_width - stride_width , 0 )
else:
lowercase_ : int = max(kernel_width - (in_width % stride_width) , 0 )
lowercase_ : int = pad_along_width // 2
lowercase_ : Union[str, Any] = pad_along_width - pad_left
lowercase_ : Tuple = pad_along_height // 2
lowercase_ : List[str] = pad_along_height - pad_top
lowercase_ : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__snake_case , __snake_case , '''constant''' , 0.0 )
class _UpperCAmelCase ( nn.Module ):
def __init__( self : List[Any] , A : MobileNetVaConfig , A : int , A : int , A : int , A : Optional[int] = 1 , A : Optional[int] = 1 , A : bool = False , A : Optional[bool] = True , A : Optional[bool or str] = True , ) -> None:
super().__init__()
lowercase_ : int = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
lowercase_ : Tuple = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowercase_ : int = nn.Convad(
in_channels=A , out_channels=A , kernel_size=A , stride=A , padding=A , groups=A , bias=A , padding_mode='''zeros''' , )
if use_normalization:
lowercase_ : Optional[Any] = nn.BatchNormad(
num_features=A , eps=config.layer_norm_eps , momentum=0.9997 , affine=A , track_running_stats=A , )
else:
lowercase_ : Union[str, Any] = None
if use_activation:
if isinstance(A , A ):
lowercase_ : str = ACTaFN[use_activation]
elif isinstance(config.hidden_act , A ):
lowercase_ : Any = ACTaFN[config.hidden_act]
else:
lowercase_ : Tuple = config.hidden_act
else:
lowercase_ : Tuple = None
def A ( self : str , A : torch.Tensor ) -> torch.Tensor:
if self.config.tf_padding:
lowercase_ : List[Any] = apply_tf_padding(A , self.convolution )
lowercase_ : Optional[int] = self.convolution(A )
if self.normalization is not None:
lowercase_ : Union[str, Any] = self.normalization(A )
if self.activation is not None:
lowercase_ : Optional[int] = self.activation(A )
return features
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Optional[int] = MobileNetVaConfig
SCREAMING_SNAKE_CASE_ : int = load_tf_weights_in_mobilenet_va
SCREAMING_SNAKE_CASE_ : Optional[Any] = "mobilenet_v1"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "pixel_values"
SCREAMING_SNAKE_CASE_ : List[str] = False
def A ( self : Any , A : Union[nn.Linear, nn.Convad] ) -> None:
if isinstance(A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(A , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__A : Union[str, Any] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__A : List[str] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , _A , )
class _UpperCAmelCase ( _A ):
def __init__( self : str , A : MobileNetVaConfig , A : bool = True ) -> int:
super().__init__(A )
lowercase_ : Union[str, Any] = config
lowercase_ : List[str] = 32
lowercase_ : str = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowercase_ : Union[str, Any] = MobileNetVaConvLayer(
A , in_channels=config.num_channels , out_channels=A , kernel_size=3 , stride=2 , )
lowercase_ : Optional[Any] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowercase_ : List[Any] = nn.ModuleList()
for i in range(13 ):
lowercase_ : Dict = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowercase_ : str = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
A , in_channels=A , out_channels=A , kernel_size=3 , stride=strides[i] , groups=A , ) )
self.layer.append(
MobileNetVaConvLayer(
A , in_channels=A , out_channels=A , kernel_size=1 , ) )
lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def A ( self : Any , A : Optional[Any] ) -> Optional[int]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : List[Any] , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
lowercase_ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
lowercase_ : List[str] = self.conv_stem(A )
lowercase_ : Dict = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowercase_ : Optional[int] = layer_module(A )
if output_hidden_states:
lowercase_ : str = all_hidden_states + (hidden_states,)
lowercase_ : Tuple = hidden_states
if self.pooler is not None:
lowercase_ : Dict = torch.flatten(self.pooler(A ) , start_dim=1 )
else:
lowercase_ : Optional[Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A , pooler_output=A , hidden_states=A , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _A , )
class _UpperCAmelCase ( _A ):
def __init__( self : List[str] , A : MobileNetVaConfig ) -> None:
super().__init__(A )
lowercase_ : int = config.num_labels
lowercase_ : List[str] = MobileNetVaModel(A )
lowercase_ : Union[str, Any] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowercase_ : Tuple = nn.Dropout(config.classifier_dropout_prob , inplace=A )
lowercase_ : int = nn.Linear(A , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Optional[Any] , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : List[Any] = self.mobilenet_va(A , output_hidden_states=A , return_dict=A )
lowercase_ : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : Dict = self.classifier(self.dropout(A ) )
lowercase_ : int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ : List[str] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ : Optional[Any] = '''single_label_classification'''
else:
lowercase_ : Tuple = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase_ : str = MSELoss()
if self.num_labels == 1:
lowercase_ : List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ : List[str] = loss_fct(A , A )
elif self.config.problem_type == "single_label_classification":
lowercase_ : List[Any] = CrossEntropyLoss()
lowercase_ : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ : str = BCEWithLogitsLoss()
lowercase_ : List[Any] = loss_fct(A , A )
if not return_dict:
lowercase_ : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=A , logits=A , hidden_states=outputs.hidden_states , )
| 33
| 0
|
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class A__ ( _A ):
lowerCAmelCase__ : Optional[int] = CustomTokenizer
pass
| 325
|
"""simple docstring"""
def lowercase ( __snake_case : list[int] ):
lowercase_ : List[Any] = len(__snake_case )
for i in range(__snake_case ):
for j in range(i + 1 , __snake_case ):
if numbers[j] < numbers[i]:
lowercase_ , lowercase_ : Optional[int] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__A : int = input('''Enter numbers separated by a comma:\n''').strip()
__A : Any = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 33
| 0
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int="pt" ) -> Tuple:
'''simple docstring'''
lowercase = {'''add_prefix_space''': True} if isinstance(__snake_case , __snake_case ) and not line.startswith(""" """ ) else {}
lowercase = padding_side
return tokenizer(
[line] , max_length=__snake_case , padding="""max_length""" if pad_to_max_length else None , truncation=__snake_case , return_tensors=__snake_case , add_special_tokens=__snake_case , **__snake_case , )
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int=None , ) -> int:
'''simple docstring'''
lowercase = input_ids.ne(__snake_case ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _A ( _A ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="train" , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="" , ):
"""simple docstring"""
super().__init__()
lowercase = Path(__lowerCAmelCase ).joinpath(type_path + """.source""" )
lowercase = Path(__lowerCAmelCase ).joinpath(type_path + """.target""" )
lowercase = self.get_char_lens(self.src_file )
lowercase = max_source_length
lowercase = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
lowercase = tokenizer
lowercase = prefix
if n_obs is not None:
lowercase = self.src_lens[:n_obs]
lowercase = src_lang
lowercase = tgt_lang
def __len__( self ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = index + 1 # linecache starts at 1
lowercase = self.prefix + linecache.getline(str(self.src_file ) , __lowerCAmelCase ).rstrip("""\n""" )
lowercase = linecache.getline(str(self.tgt_file ) , __lowerCAmelCase ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowerCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
)
lowercase = self.tokenizer.generator if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
lowercase = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_source_length , """right""" )
lowercase = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_target_length , """right""" )
lowercase = source_inputs['''input_ids'''].squeeze()
lowercase = target_inputs['''input_ids'''].squeeze()
lowercase = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def A__ ( __lowerCAmelCase ):
"""simple docstring"""
return [len(__lowerCAmelCase ) for x in Path(__lowerCAmelCase ).open().readlines()]
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = torch.stack([x["""input_ids"""] for x in batch] )
lowercase = torch.stack([x["""attention_mask"""] for x in batch] )
lowercase = torch.stack([x["""decoder_input_ids"""] for x in batch] )
lowercase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
lowercase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
lowercase = trim_batch(__lowerCAmelCase , __lowerCAmelCase )
lowercase = trim_batch(__lowerCAmelCase , __lowerCAmelCase , attention_mask=__lowerCAmelCase )
lowercase = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
__lowerCAmelCase : Tuple =getLogger(__name__)
def UpperCAmelCase__ ( lowerCAmelCase__ :List[List] ) -> List[Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(__snake_case ) )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> Any:
'''simple docstring'''
lowercase = get_git_info()
save_json(__snake_case , os.path.join(__snake_case , """git_log.json""" ) )
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Any=4 , **lowerCAmelCase__ :Dict ) -> Optional[int]:
'''simple docstring'''
with open(__snake_case , """w""" ) as f:
json.dump(__snake_case , __snake_case , indent=__snake_case , **__snake_case )
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple ) -> Dict:
'''simple docstring'''
with open(__snake_case ) as f:
return json.load(__snake_case )
def UpperCAmelCase__ ( ) -> Any:
'''simple docstring'''
lowercase = git.Repo(search_parent_directories=__snake_case )
lowercase = {
'''repo_id''': str(__snake_case ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase__ ( lowerCAmelCase__ :Callable , lowerCAmelCase__ :Iterable ) -> List[str]:
'''simple docstring'''
return list(map(__snake_case , __snake_case ) )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] ) -> str:
'''simple docstring'''
with open(__snake_case , """wb""" ) as f:
return pickle.dump(__snake_case , __snake_case )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Union[str, Any]:
'''simple docstring'''
def remove_articles(lowerCAmelCase__ :List[Any] ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , __snake_case )
def white_space_fix(lowerCAmelCase__ :List[str] ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase__ :Optional[int] ):
lowercase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase__ :Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__snake_case ) ) ) )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict ) -> Union[str, Any]:
'''simple docstring'''
lowercase = normalize_answer(__snake_case ).split()
lowercase = normalize_answer(__snake_case ).split()
lowercase = Counter(__snake_case ) & Counter(__snake_case )
lowercase = sum(common.values() )
if num_same == 0:
return 0
lowercase = 1.0 * num_same / len(__snake_case )
lowercase = 1.0 * num_same / len(__snake_case )
lowercase = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any ) -> int:
'''simple docstring'''
return normalize_answer(__snake_case ) == normalize_answer(__snake_case )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] ) -> Optional[Any]:
'''simple docstring'''
assert len(__snake_case ) == len(__snake_case )
lowercase = 0
for hypo, pred in zip(__snake_case , __snake_case ):
em += exact_match_score(__snake_case , __snake_case )
if len(__snake_case ) > 0:
em /= len(__snake_case )
return {"em": em}
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] ) -> Any:
'''simple docstring'''
return model_prefix.startswith("""rag""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase = '''dropout_rate'''
for p in extra_params:
if getattr(__snake_case , __snake_case , __snake_case ):
if not hasattr(__snake_case , __snake_case ) and not hasattr(__snake_case , equivalent_param[p] ):
logger.info("""config doesn\'t have a `{}` attribute""".format(__snake_case ) )
delattr(__snake_case , __snake_case )
continue
lowercase = p if hasattr(__snake_case , __snake_case ) else equivalent_param[p]
setattr(__snake_case , __snake_case , getattr(__snake_case , __snake_case ) )
delattr(__snake_case , __snake_case )
return hparams, config
| 197
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : List[Any] , A : Any , A : Tuple=7 , A : Tuple=3 , A : Optional[Any]=30 , A : List[Any]=4_00 , A : Tuple=True , A : Dict=None , A : List[str]=True , A : Optional[int]=[0.5, 0.5, 0.5] , A : Tuple=[0.5, 0.5, 0.5] , A : List[str]=True , A : List[Any]=1 / 2_55 , A : Union[str, Any]=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase_ : Optional[int] = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
lowercase_ : Optional[int] = parent
lowercase_ : str = batch_size
lowercase_ : Tuple = num_channels
lowercase_ : str = min_resolution
lowercase_ : Any = max_resolution
lowercase_ : str = do_resize
lowercase_ : Any = size
lowercase_ : Optional[int] = do_normalize
lowercase_ : List[str] = image_mean
lowercase_ : Optional[Any] = image_std
lowercase_ : int = do_rescale
lowercase_ : List[str] = rescale_factor
lowercase_ : int = do_pad
def A ( self : Any ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A ( self : Optional[Any] , A : int , A : int=False ) -> Tuple:
if not batched:
lowercase_ : Optional[int] = image_inputs[0]
if isinstance(A , Image.Image ):
lowercase_ , lowercase_ : int = image.size
else:
lowercase_ , lowercase_ : Tuple = image.shape[1], image.shape[2]
if w < h:
lowercase_ : int = int(self.size['''shortest_edge'''] * h / w )
lowercase_ : Optional[Any] = self.size['''shortest_edge''']
elif w > h:
lowercase_ : Optional[Any] = self.size['''shortest_edge''']
lowercase_ : Optional[int] = int(self.size['''shortest_edge'''] * w / h )
else:
lowercase_ : Any = self.size['''shortest_edge''']
lowercase_ : Any = self.size['''shortest_edge''']
else:
lowercase_ : Tuple = []
for image in image_inputs:
lowercase_ , lowercase_ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase_ : Union[str, Any] = max(A , key=lambda A : item[0] )[0]
lowercase_ : Optional[Any] = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCAmelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = YolosImageProcessor if is_vision_available() else None
def A ( self : Optional[int] ) -> Optional[int]:
lowercase_ : Optional[Any] = YolosImageProcessingTester(self )
@property
def A ( self : str ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Optional[int] ) -> List[str]:
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def A ( self : Dict ) -> Tuple:
lowercase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , A )
lowercase_ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , A )
def A ( self : Optional[int] ) -> Tuple:
pass
def A ( self : Tuple ) -> int:
# Initialize image_processing
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowercase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ , lowercase_ : Dict = self.image_processor_tester.get_expected_values(A , batched=A )
lowercase_ : str = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : str ) -> Any:
# Initialize image_processing
lowercase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowercase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : int = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ : Optional[int] = image_processing(A , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : List[Any] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Optional[int]:
# Initialize image_processing
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowercase_ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ : Any = image_processing(A , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : List[str] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Optional[Any]:
# Initialize image_processings
lowercase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
lowercase_ : Tuple = self.image_processing_class(do_resize=A , do_normalize=A , do_rescale=A )
# create random PyTorch tensors
lowercase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowercase_ : Union[str, Any] = image_processing_a.pad(A , return_tensors='''pt''' )
lowercase_ : List[Any] = image_processing_a(A , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def A ( self : str ) -> List[Any]:
# prepare image and target
lowercase_ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowercase_ : List[Any] = json.loads(f.read() )
lowercase_ : Tuple = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
lowercase_ : Union[str, Any] = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
lowercase_ : List[Any] = image_processing(images=A , annotations=A , return_tensors='''pt''' )
# verify pixel values
lowercase_ : Union[str, Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowercase_ : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowercase_ : Tuple = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowercase_ : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowercase_ : Any = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1e-3 ) )
# verify image_id
lowercase_ : List[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowercase_ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowercase_ : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify orig_size
lowercase_ : List[str] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowercase_ : Optional[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
@slow
def A ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
lowercase_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowercase_ : str = json.loads(f.read() )
lowercase_ : int = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
lowercase_ : List[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowercase_ : int = YolosImageProcessor(format='''coco_panoptic''' )
lowercase_ : Any = image_processing(images=A , annotations=A , masks_path=A , return_tensors='''pt''' )
# verify pixel values
lowercase_ : Optional[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowercase_ : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowercase_ : List[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowercase_ : str = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowercase_ : List[str] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1e-3 ) )
# verify image_id
lowercase_ : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowercase_ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowercase_ : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify masks
lowercase_ : Dict = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A )
# verify orig_size
lowercase_ : Tuple = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowercase_ : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
| 33
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.