code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import os
from pathlib import Path
def UpperCamelCase( ):
from torch.utils.cpp_extension import load
UpperCAmelCase : Dict = Path(__lowercase ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
UpperCAmelCase : List[Any] = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , __lowercase , with_cuda=__lowercase , extra_include_paths=[str(__lowercase )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 151
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : List[Any] ={
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] =[
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
a__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 53
| 0
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=[30, 30] , _A=2 , _A=3 , _A=True , _A=True , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=10 , _A=0.02 , _A=3 , _A=None , _A=8 , _A=10 , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = n_targets
SCREAMING_SNAKE_CASE_ = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
SCREAMING_SNAKE_CASE_ = (image_size[1] // patch_size) * (image_size[0] // patch_size)
SCREAMING_SNAKE_CASE_ = num_patches + 1 + self.num_detection_tokens
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
SCREAMING_SNAKE_CASE_ = []
for i in range(self.batch_size ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__A )
SCREAMING_SNAKE_CASE_ = torch.rand(self.n_targets , 4 , device=__A )
labels.append(__A )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ) -> Tuple:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _UpperCamelCase ( self , _A , _A , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = YolosModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _UpperCamelCase ( self , _A , _A , _A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = YolosForObjectDetection(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(pixel_values=__A )
SCREAMING_SNAKE_CASE_ = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
SCREAMING_SNAKE_CASE_ = model(pixel_values=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(YolosModel, YolosForObjectDetection) if is_torch_available() else ()
UpperCAmelCase_ =(
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self , _A , _A , _A=False ) -> Any:
SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
SCREAMING_SNAKE_CASE_ = []
for i in range(self.model_tester.batch_size ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = torch.ones(
size=(self.model_tester.n_targets,) , device=__A , dtype=torch.long )
SCREAMING_SNAKE_CASE_ = torch.ones(
self.model_tester.n_targets , 4 , device=__A , dtype=torch.float )
labels.append(__A )
SCREAMING_SNAKE_CASE_ = labels
return inputs_dict
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = YolosModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def _UpperCamelCase ( self ) -> int:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Tuple:
# YOLOS does not use inputs_embeds
pass
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__A )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __A )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = True
# in YOLOS, the seq_len is different
SCREAMING_SNAKE_CASE_ = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(__A , __A ) )
SCREAMING_SNAKE_CASE_ = outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(__A , __A ) )
SCREAMING_SNAKE_CASE_ = outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
SCREAMING_SNAKE_CASE_ = len(__A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(__A , __A ) )
SCREAMING_SNAKE_CASE_ = 1
self.assertEqual(out_len + added_hidden_states , len(__A ) )
SCREAMING_SNAKE_CASE_ = outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCamelCase ( self ) -> Optional[int]:
def check_hidden_states_output(_A , _A , _A ):
SCREAMING_SNAKE_CASE_ = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(__A , __A ) )
SCREAMING_SNAKE_CASE_ = outputs.hidden_states
SCREAMING_SNAKE_CASE_ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__A ) , __A )
# YOLOS has a different seq_length
SCREAMING_SNAKE_CASE_ = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(__A , __A , __A )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__A )
@slow
def _UpperCamelCase ( self ) -> str:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = YolosModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase ( self ) -> Tuple:
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__A )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__A , return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(inputs.pixel_values )
# verify outputs
SCREAMING_SNAKE_CASE_ = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __A )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=__A , )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __A , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __A , atol=1E-4 ) )
# verify postprocessing
SCREAMING_SNAKE_CASE_ = image_processor.post_process_object_detection(
__A , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
SCREAMING_SNAKE_CASE_ = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(__A )
SCREAMING_SNAKE_CASE_ = [75, 75, 17, 63, 17]
SCREAMING_SNAKE_CASE_ = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(__A )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , __A , atol=1E-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , __A )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __A ) )
| 299
|
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any]=False ) -> Tuple:
"""simple docstring"""
try:
__UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__UpperCamelCase = strtobool(__lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
a__ : str =parse_flag_from_env('''RUN_SLOW''', default=False)
a__ : Union[str, Any] =parse_flag_from_env('''RUN_REMOTE''', default=False)
a__ : List[str] =parse_flag_from_env('''RUN_LOCAL''', default=True)
a__ : Optional[int] =parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
a__ : Any =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
a__ : Optional[int] =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
a__ : List[str] =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
a__ : Any =pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
a__ : Tuple =pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
a__ : Union[str, Any] =pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
a__ : int =pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def lowercase__ ( __lowercase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
__UpperCamelCase = unittest.skip('test requires faiss' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Union[str, Any] ) -> Any:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
__UpperCamelCase = unittest.skip('test requires regex' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Tuple ) -> List[Any]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
__UpperCamelCase = unittest.skip('test requires elasticsearch' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
__UpperCamelCase = unittest.skip('test requires sqlalchemy' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : List[str] ) -> List[str]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
__UpperCamelCase = unittest.skip('test requires PyTorch' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Optional[Any] ) -> List[str]:
"""simple docstring"""
if not config.TF_AVAILABLE:
__UpperCamelCase = unittest.skip('test requires TensorFlow' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
if not config.JAX_AVAILABLE:
__UpperCamelCase = unittest.skip('test requires JAX' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : str ) -> Optional[Any]:
"""simple docstring"""
if not config.PIL_AVAILABLE:
__UpperCamelCase = unittest.skip('test requires Pillow' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Dict ) -> Any:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : int ) -> int:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : str ) -> int:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : str ) -> Any:
"""simple docstring"""
def _require_spacy_model(__lowercase : Any ):
try:
import spacy # noqa F401
spacy.load(__lowercase )
except ImportError:
return unittest.skip('test requires spacy' )(__lowercase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__lowercase ) )(__lowercase )
else:
return test_case
return _require_spacy_model
def lowercase__ ( __lowercase : Union[str, Any] ) -> str:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : List[Any] ) -> List[str]:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
__UpperCamelCase = unittest.skip('test is slow' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : List[Any] ) -> List[str]:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
__UpperCamelCase = unittest.skip('test is local' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : str ) -> List[str]:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
__UpperCamelCase = unittest.skip('test is packaged' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Optional[int] ) -> Any:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
__UpperCamelCase = unittest.skip('test requires remote' )(__lowercase )
return test_case
def lowercase__ ( *__lowercase : Optional[Any] ) -> Tuple:
"""simple docstring"""
def decorate(cls : int ):
for name, fn in cls.__dict__.items():
if callable(__lowercase ) and name.startswith('test' ):
for decorator in decorators:
__UpperCamelCase = decorator(__lowercase )
setattr(cls , __lowercase , __lowercase )
return cls
return decorate
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
pass
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =0
SCREAMING_SNAKE_CASE_ : List[Any] =1
SCREAMING_SNAKE_CASE_ : Union[str, Any] =2
@contextmanager
def lowercase__ ( __lowercase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , __lowercase : Dict=1e-16 ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = requests.Session().request
def timeout_request(__lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , **__lowercase : List[str] ):
# Change the url to an invalid url so that the connection hangs
__UpperCamelCase = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__UpperCamelCase = timeout
try:
return online_request(__lowercase , __lowercase , **__lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__UpperCamelCase = url
__UpperCamelCase = e.args[0]
__UpperCamelCase = (max_retry_error.args[0].replace('10.255.255.1' , F'''OfflineMock[{url}]''' ),)
__UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(__lowercase : int , __lowercase : List[str] , **__lowercase : Union[str, Any] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __lowercase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def lowercase__ ( *__lowercase : Any , **__lowercase : Dict ) -> Dict:
"""simple docstring"""
__UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__lowercase , **__lowercase ) as tmp_dir:
try:
os.chdir(__lowercase )
yield
finally:
os.chdir(__lowercase )
@contextmanager
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowercase__ ( __lowercase : List[str] , __lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
return deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist()
def lowercase__ ( __lowercase : str ) -> List[str]:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(__lowercase : List[Any] , *__lowercase : Tuple , **__lowercase : Union[str, Any] ):
try:
return func(*__lowercase , **__lowercase )
except HTTPError as err:
if str(__lowercase ).startswith('500' ) or str(__lowercase ).startswith('502' ):
pytest.xfail(str(__lowercase ) )
raise err
return decorator.decorator(_wrapper , __lowercase )
class snake_case :
"""simple docstring"""
def __init__( self : int , __A : Any , __A : str , __A : List[Any] ):
__UpperCamelCase = returncode
__UpperCamelCase = stdout
__UpperCamelCase = stderr
async def lowercase__ ( __lowercase : Any , __lowercase : Optional[int] ) -> str:
"""simple docstring"""
while True:
__UpperCamelCase = await stream.readline()
if line:
callback(__lowercase )
else:
break
async def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None , __lowercase : Any=None , __lowercase : Optional[Any]=None , __lowercase : int=False , __lowercase : List[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('\nRunning: ' , ' '.join(__lowercase ) )
__UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowercase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__UpperCamelCase = []
__UpperCamelCase = []
def tee(__lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : Tuple="" ):
__UpperCamelCase = line.decode('utf-8' ).rstrip()
sink.append(__lowercase )
if not quiet:
print(__lowercase , __lowercase , file=__lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __lowercase : tee(__lowercase , __lowercase , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda __lowercase : tee(__lowercase , __lowercase , sys.stderr , label='stderr:' ) ),
] , timeout=__lowercase , )
return _RunOutput(await p.wait() , __lowercase , __lowercase )
def lowercase__ ( __lowercase : Dict , __lowercase : Any=None , __lowercase : int=None , __lowercase : int=180 , __lowercase : int=False , __lowercase : str=True ) -> _RunOutput:
"""simple docstring"""
__UpperCamelCase = asyncio.get_event_loop()
__UpperCamelCase = loop.run_until_complete(
_stream_subprocess(__lowercase , env=__lowercase , stdin=__lowercase , timeout=__lowercase , quiet=__lowercase , echo=__lowercase ) )
__UpperCamelCase = ' '.join(__lowercase )
if result.returncode > 0:
__UpperCamelCase = '\n'.join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' )
return result
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__UpperCamelCase = re.sub(R'^gw' , '' , __lowercase , 0 , re.M )
return int(__lowercase )
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = 29500
__UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 53
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class a__( __lowerCamelCase ):
pass
class a__:
def __init__( self : List[Any] , __snake_case : Any ):
a : Dict = data
a : List[Any] = None
def __iter__( self : Optional[Any] ):
a : Union[str, Any] = self
a : Optional[int] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__A )
yield node.data
a : Optional[int] = node.next_node
@property
def lowercase_ ( self : List[str] ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowerCAmelCase: Dict = Node(1)
lowerCAmelCase: Optional[int] = Node(2)
lowerCAmelCase: List[str] = Node(3)
lowerCAmelCase: Optional[int] = Node(4)
print(root_node.has_loop) # False
lowerCAmelCase: str = root_node.next_node
print(root_node.has_loop) # True
lowerCAmelCase: Optional[int] = Node(5)
lowerCAmelCase: List[Any] = Node(6)
lowerCAmelCase: int = Node(5)
lowerCAmelCase: Tuple = Node(6)
print(root_node.has_loop) # False
lowerCAmelCase: str = Node(1)
print(root_node.has_loop) # False
| 297
|
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
a__ : Tuple ='''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 53
| 0
|
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_a : str = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
_a : List[str] = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
_lowerCAmelCase : str = calculate_rouge(__lowercase ,__lowercase ,bootstrap_aggregation=__lowercase ,rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(__lowercase ,__lowercase )
_lowerCAmelCase : Optional[int] = calculate_rouge(__lowercase ,__lowercase ,bootstrap_aggregation=__lowercase ,rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Dict = """rougeLsum"""
_lowerCAmelCase : Dict = calculate_rouge(__lowercase ,__lowercase ,newline_sep=__lowercase ,rouge_keys=[k] )[k]
_lowerCAmelCase : Optional[Any] = calculate_rouge(__lowercase ,__lowercase ,newline_sep=__lowercase ,rouge_keys=[k] )[k]
assert score > score_no_sep
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
_lowerCAmelCase : List[str] = ["""rouge1""", """rouge2""", """rougeL"""]
_lowerCAmelCase : Dict = calculate_rouge(__lowercase ,__lowercase ,newline_sep=__lowercase ,rouge_keys=__lowercase )
_lowerCAmelCase : List[str] = calculate_rouge(__lowercase ,__lowercase ,newline_sep=__lowercase ,rouge_keys=__lowercase )
assert score_sep == score_no_sep
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
_lowerCAmelCase : List[str] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
_lowerCAmelCase : Tuple = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(__lowercase ,__lowercase ,newline_sep=__lowercase ) == calculate_rouge(__lowercase ,__lowercase ,newline_sep=__lowercase )
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : str = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
_lowerCAmelCase : Any = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
_lowerCAmelCase : Dict = calculate_rouge(__lowercase ,__lowercase ,rouge_keys=["""rougeLsum"""] ,newline_sep=__lowercase )["""rougeLsum"""]
_lowerCAmelCase : Optional[Any] = calculate_rouge(__lowercase ,__lowercase ,rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Union[str, Any] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
_lowerCAmelCase : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) ,data_dir.joinpath("""test.target""" ) )
assert isinstance(__lowercase ,__lowercase )
_lowerCAmelCase : List[Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) ,data_dir.joinpath("""test.target""" ) ,bootstrap_aggregation=__lowercase )
assert isinstance(__lowercase ,__lowercase )
| 44
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Tuple ) -> Tuple:
"""simple docstring"""
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[str] , __lowercase : List[str]="attention" ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
__UpperCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
__UpperCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
__UpperCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
__UpperCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowercase__ ( __lowercase : Tuple , __lowercase : Dict , __lowercase : int , __lowercase : List[Any]=False ) -> Optional[Any]:
"""simple docstring"""
if split_mlp_wi:
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
__UpperCamelCase = (wi_a, wi_a)
else:
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Optional[int] ) -> str:
"""simple docstring"""
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def lowercase__ ( __lowercase : dict , *, __lowercase : int , __lowercase : bool , __lowercase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = traverse_util.flatten_dict(variables['target'] )
__UpperCamelCase = {'/'.join(__lowercase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__UpperCamelCase = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , __lowercase )
__UpperCamelCase = collections.OrderedDict()
# Shared embeddings.
__UpperCamelCase = old['token_embedder/embedding']
# Encoder.
for i in range(__lowercase ):
# Block i, layer 0 (Self Attention).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'encoder' , 'attention' )
__UpperCamelCase = layer_norm
__UpperCamelCase = k.T
__UpperCamelCase = o.T
__UpperCamelCase = q.T
__UpperCamelCase = v.T
# Block i, layer 1 (MLP).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_mlp_layer_norm' )
__UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'encoder' , __lowercase )
__UpperCamelCase = layer_norm
if split_mlp_wi:
__UpperCamelCase = wi[0].T
__UpperCamelCase = wi[1].T
else:
__UpperCamelCase = wi.T
__UpperCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCamelCase = tax_relpos_bias_lookup(
__lowercase , __lowercase , 'encoder' ).T
__UpperCamelCase = old['encoder/encoder_norm/scale']
if not scalable_attention:
__UpperCamelCase = tax_relpos_bias_lookup(
__lowercase , 0 , 'encoder' ).T
__UpperCamelCase = tax_relpos_bias_lookup(
__lowercase , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowercase ):
# Block i, layer 0 (Self Attention).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_self_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'self_attention' )
__UpperCamelCase = layer_norm
__UpperCamelCase = k.T
__UpperCamelCase = o.T
__UpperCamelCase = q.T
__UpperCamelCase = v.T
# Block i, layer 1 (Cross Attention).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_cross_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'encoder_decoder_attention' )
__UpperCamelCase = layer_norm
__UpperCamelCase = k.T
__UpperCamelCase = o.T
__UpperCamelCase = q.T
__UpperCamelCase = v.T
# Block i, layer 2 (MLP).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_mlp_layer_norm' )
__UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'decoder' , __lowercase )
__UpperCamelCase = layer_norm
if split_mlp_wi:
__UpperCamelCase = wi[0].T
__UpperCamelCase = wi[1].T
else:
__UpperCamelCase = wi.T
__UpperCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCamelCase = tax_relpos_bias_lookup(__lowercase , __lowercase , 'decoder' ).T
__UpperCamelCase = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__UpperCamelCase = old['decoder/logits_dense/kernel'].T
return new
def lowercase__ ( __lowercase : Optional[Any] , __lowercase : bool ) -> int:
"""simple docstring"""
__UpperCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__UpperCamelCase = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__UpperCamelCase = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
__UpperCamelCase = state_dict['shared.weight']
return state_dict
def lowercase__ ( __lowercase : List[str] , __lowercase : Dict , __lowercase : str , __lowercase : int , __lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = checkpoints.load_tax_checkpoint(__lowercase )
__UpperCamelCase = convert_tax_to_pytorch(
__lowercase , num_layers=config.num_layers , is_encoder_only=__lowercase , scalable_attention=__lowercase )
__UpperCamelCase = make_state_dict(__lowercase , __lowercase )
model.load_state_dict(__lowercase , strict=__lowercase )
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : bool = False , __lowercase : bool = False , ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = MTaConfig.from_json_file(__lowercase )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__UpperCamelCase = UMTaEncoderModel(__lowercase )
else:
__UpperCamelCase = UMTaForConditionalGeneration(__lowercase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__lowercase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowercase )
print('Done' )
if __name__ == "__main__":
a__ : List[Any] =argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
a__ : List[str] =parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 53
| 0
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
A: List[str] = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
A: Dict = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
A: List[Any] = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
UpperCAmelCase : List[str] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
UpperCAmelCase : List[Any] = evaluate(dataset=__A , predictions=__A )
return score
| 109
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : List[Any] ="BlipImageProcessor"
SCREAMING_SNAKE_CASE_ : Optional[int] =("BertTokenizer", "BertTokenizerFast")
def __init__( self : Dict , __A : Optional[int] , __A : List[Any] ):
__UpperCamelCase = False
super().__init__(__A , __A )
__UpperCamelCase = self.image_processor
def __call__( self : List[Any] , __A : ImageInput = None , __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = None , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : List[Any] , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
__UpperCamelCase = self.tokenizer
__UpperCamelCase = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
return text_encoding
# add pixel_values
__UpperCamelCase = self.image_processor(__A , return_tensors=__A )
if text is not None:
__UpperCamelCase = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
else:
__UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(__A )
return encoding_image_processor
def _lowerCamelCase ( self : List[Any] , *__A : Dict , **__A : Optional[int] ):
return self.tokenizer.batch_decode(*__A , **__A )
def _lowerCamelCase ( self : List[Any] , *__A : List[str] , **__A : Dict ):
return self.tokenizer.decode(*__A , **__A )
@property
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = self.tokenizer.model_input_names
__UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 53
| 0
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__( __lowerCamelCase):
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A , """embed_dim""" ) )
self.parent.assertTrue(hasattr(__A , """num_heads""" ) )
class lowerCamelCase__:
def __init__( self: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict=13 , UpperCamelCase_: Optional[Any]=64 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: str=[16, 48, 96] , UpperCamelCase_: List[Any]=[1, 3, 6] , UpperCamelCase_: Tuple=[1, 2, 10] , UpperCamelCase_: str=[7, 3, 3] , UpperCamelCase_: Optional[int]=[4, 2, 2] , UpperCamelCase_: Any=[2, 1, 1] , UpperCamelCase_: List[str]=[2, 2, 2] , UpperCamelCase_: List[Any]=[False, False, True] , UpperCamelCase_: Dict=[0.0, 0.0, 0.0] , UpperCamelCase_: Dict=0.02 , UpperCamelCase_: int=1E-12 , UpperCamelCase_: int=True , UpperCamelCase_: List[Any]=True , UpperCamelCase_: List[Any]=2 , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = patch_sizes
__lowerCamelCase = patch_stride
__lowerCamelCase = patch_padding
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = num_labels
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = num_heads
__lowerCamelCase = stride_kv
__lowerCamelCase = depth
__lowerCamelCase = cls_token
__lowerCamelCase = attention_drop_rate
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
# create a random int32 tensor of given shape
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self: str ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: List[str] , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = TFCvtModel(config=__A )
__lowerCamelCase = model(__A , training=__A )
__lowerCamelCase = (self.image_size, self.image_size)
__lowerCamelCase, __lowerCamelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowerCamelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowerCamelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFCvtForImageClassification(__A )
__lowerCamelCase = model(__A , labels=__A , training=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = config_and_inputs
__lowerCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
UpperCAmelCase__ : int = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : int = False
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = TFCvtModelTester(self )
__lowerCamelCase = TFCvtConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def lowerCAmelCase__ ( self: Any ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def lowerCAmelCase__ ( self: Tuple ):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def lowerCAmelCase__ ( self: List[str] ):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def lowerCAmelCase__ ( self: int ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def lowerCAmelCase__ ( self: Dict ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def lowerCAmelCase__ ( self: List[Any] ):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__A )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(__A )
__lowerCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
def lowerCAmelCase__ ( self: Union[str, Any] ):
def check_hidden_states_output(UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict ):
__lowerCamelCase = model_class(__A )
__lowerCamelCase = model(**self._prepare_for_class(__A , __A ) )
__lowerCamelCase = outputs.hidden_states
__lowerCamelCase = len(self.model_tester.depth )
self.assertEqual(len(__A ) , __A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
check_hidden_states_output(__A , __A , __A )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def lowerCAmelCase__ ( self: Dict ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = TFCvtModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowerCamelCase__( unittest.TestCase):
@cached_property
def lowerCAmelCase__ ( self: Dict ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=__A , return_tensors="""tf""" )
# forward pass
__lowerCamelCase = model(**__A )
# verify the logits
__lowerCamelCase = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , __A )
__lowerCamelCase = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __A , atol=1E-4 ) )
| 12
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
pass
class snake_case :
"""simple docstring"""
def __init__( self : List[Any] , __A : Any ):
__UpperCamelCase = data
__UpperCamelCase = None
def __iter__( self : Optional[Any] ):
__UpperCamelCase = self
__UpperCamelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__A )
yield node.data
__UpperCamelCase = node.next_node
@property
def _lowerCamelCase ( self : List[str] ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
a__ : Dict =Node(1)
a__ : Optional[int] =Node(2)
a__ : List[str] =Node(3)
a__ : Optional[int] =Node(4)
print(root_node.has_loop) # False
a__ : str =root_node.next_node
print(root_node.has_loop) # True
a__ : Optional[int] =Node(5)
a__ : List[Any] =Node(6)
a__ : int =Node(5)
a__ : Tuple =Node(6)
print(root_node.has_loop) # False
a__ : str =Node(1)
print(root_node.has_loop) # False
| 53
| 0
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__UpperCamelCase : Union[str, Any] = (3, 9, -11, 0, 7, 5, 1, -1)
__UpperCamelCase : Optional[int] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowercase__ :
UpperCamelCase_ = 42
UpperCamelCase_ = 42
class lowercase__ :
def __init__( self : Optional[Any] , UpperCamelCase__ : Iterable[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = None
for i in sorted(__A , reverse=__A ):
SCREAMING_SNAKE_CASE : Optional[Any] = Node(__A , self.head )
def __iter__( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE : Dict = node.next_node
def __len__( self : Optional[int] ):
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self : List[Any] ):
'''simple docstring'''
return " -> ".join([str(__A ) for node in self] )
def A ( _lowercase , _lowercase ):
return SortedLinkedList(list(__lowercase ) + list(__lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : List[Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 182
|
'''simple docstring'''
a__ : Optional[Any] =256
# Modulus to hash a string
a__ : Dict =1_000_003
def lowercase__ ( __lowercase : str , __lowercase : str ) -> bool:
"""simple docstring"""
__UpperCamelCase = len(__lowercase )
__UpperCamelCase = len(__lowercase )
if p_len > t_len:
return False
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(__lowercase ):
__UpperCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCamelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCamelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowercase__ ( ) -> None:
"""simple docstring"""
__UpperCamelCase = 'abc1abc12'
__UpperCamelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__UpperCamelCase = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(__lowercase , __lowercase ) and not rabin_karp(__lowercase , __lowercase )
# Test 2)
__UpperCamelCase = 'ABABX'
__UpperCamelCase = 'ABABZABABYABABX'
assert rabin_karp(__lowercase , __lowercase )
# Test 3)
__UpperCamelCase = 'AAAB'
__UpperCamelCase = 'ABAAAAAB'
assert rabin_karp(__lowercase , __lowercase )
# Test 4)
__UpperCamelCase = 'abcdabcy'
__UpperCamelCase = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(__lowercase , __lowercase )
# Test 5)
__UpperCamelCase = 'Lü'
__UpperCamelCase = 'Lüsai'
assert rabin_karp(__lowercase , __lowercase )
__UpperCamelCase = 'Lue'
assert not rabin_karp(__lowercase , __lowercase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 53
| 0
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def _a ( self ) -> int:
__UpperCamelCase =tempfile.mkdtemp()
__UpperCamelCase =8
# DPR tok
__UpperCamelCase =[
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__UpperCamelCase =os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(__A , exist_ok=__A )
__UpperCamelCase =os.path.join(__A , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
__UpperCamelCase =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__UpperCamelCase =dict(zip(__A , range(len(__A ) ) ) )
__UpperCamelCase =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__UpperCamelCase ={'unk_token': '<unk>'}
__UpperCamelCase =os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(__A , exist_ok=__A )
__UpperCamelCase =os.path.join(__A , BART_VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(__A , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__A ) )
def _a ( self ) -> List[str]:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _a ( self ) -> Union[str, Any]:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _a ( self ) -> int:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def _a ( self ) -> Dict:
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> str:
__UpperCamelCase =Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _a ( self ) -> int:
__UpperCamelCase =self.get_dummy_dataset()
__UpperCamelCase =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__UpperCamelCase =dataset
__UpperCamelCase =RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase =self.get_dummy_dataset()
__UpperCamelCase =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
__UpperCamelCase =os.path.join(self.tmpdirname , 'dataset' )
__UpperCamelCase =os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
__UpperCamelCase =RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__UpperCamelCase =RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , )
return retriever
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCamelCase =os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
__UpperCamelCase =os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
__UpperCamelCase ={sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(__A , open(__A , 'wb' ) )
__UpperCamelCase =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
__UpperCamelCase =RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =1
__UpperCamelCase =self.get_dummy_canonical_hf_index_retriever()
__UpperCamelCase =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _a ( self ) -> int:
__UpperCamelCase =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__UpperCamelCase =self.get_dummy_dataset()
retriever.save_pretrained(__A )
__UpperCamelCase =RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase =retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def _a ( self ) -> List[Any]:
__UpperCamelCase =1
__UpperCamelCase =self.get_dummy_custom_hf_index_retriever(from_disk=__A )
__UpperCamelCase =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _a ( self ) -> Any:
__UpperCamelCase =self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__UpperCamelCase =RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase =retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def _a ( self ) -> List[str]:
__UpperCamelCase =1
__UpperCamelCase =self.get_dummy_custom_hf_index_retriever(from_disk=__A )
__UpperCamelCase =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__UpperCamelCase =RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase =retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def _a ( self ) -> str:
__UpperCamelCase =1
__UpperCamelCase =self.get_dummy_legacy_index_retriever()
__UpperCamelCase =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , __A )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _a ( self ) -> Any:
__UpperCamelCase =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__UpperCamelCase =RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase =retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _a ( self ) -> Optional[Any]:
import torch
__UpperCamelCase =1
__UpperCamelCase =self.get_dummy_canonical_hf_index_retriever()
__UpperCamelCase =[[5, 7], [10, 11]]
__UpperCamelCase =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase =retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =(
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , np.ndarray )
__UpperCamelCase =retriever(
__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors='pt' , )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _a ( self ) -> Tuple:
__UpperCamelCase =self.get_dpr_ctx_encoder_tokenizer()
__UpperCamelCase =1
__UpperCamelCase =self.get_dummy_custom_hf_index_retriever(from_disk=__A )
retriever.set_ctx_encoder_tokenizer(__A )
__UpperCamelCase =[[5, 7], [10, 11]]
__UpperCamelCase =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase =retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
self.assertEqual(
len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , __A ) # check for doc token related keys in dictionary.
| 62
|
'''simple docstring'''
from __future__ import annotations
class snake_case :
"""simple docstring"""
def __init__( self : Optional[int] , __A : list[list[int]] ):
__UpperCamelCase = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(__A ) != 0:
__UpperCamelCase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__A ) != cols:
raise error
for value in row:
if not isinstance(__A , (int, float) ):
raise error
__UpperCamelCase = rows
else:
__UpperCamelCase = []
def _lowerCamelCase ( self : int ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _lowerCamelCase ( self : str ):
return len(self.rows )
@property
def _lowerCamelCase ( self : Any ):
return len(self.rows[0] )
@property
def _lowerCamelCase ( self : Optional[Any] ):
return (self.num_rows, self.num_columns)
@property
def _lowerCamelCase ( self : Dict ):
return self.order[0] == self.order[1]
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__A )
def _lowerCamelCase ( self : Any ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _lowerCamelCase ( self : List[str] ):
return bool(self.determinant() )
def _lowerCamelCase ( self : Dict , __A : int , __A : int ):
__UpperCamelCase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__A ).determinant()
def _lowerCamelCase ( self : Dict , __A : int , __A : int ):
if (row + column) % 2 == 0:
return self.get_minor(__A , __A )
return -1 * self.get_minor(__A , __A )
def _lowerCamelCase ( self : List[str] ):
return Matrix(
[
[self.get_minor(__A , __A ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _lowerCamelCase ( self : Union[str, Any] ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__A )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self : Optional[Any] ):
return str(self.rows )
def __str__( self : Union[str, Any] ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(__A ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def _lowerCamelCase ( self : List[Any] , __A : list[int] , __A : int | None = None ):
__UpperCamelCase = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(__A , __A ):
raise type_error
for value in row:
if not isinstance(__A , (int, float) ):
raise type_error
if len(__A ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(__A )
else:
__UpperCamelCase = self.rows[0:position] + [row] + self.rows[position:]
def _lowerCamelCase ( self : Optional[Any] , __A : list[int] , __A : int | None = None ):
__UpperCamelCase = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(__A , __A ):
raise type_error
for value in column:
if not isinstance(__A , (int, float) ):
raise type_error
if len(__A ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
__UpperCamelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
__UpperCamelCase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Tuple , __A : object ):
if not isinstance(__A , __A ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , __A : object ):
return not self == other
def __neg__( self : List[Any] ):
return self * -1
def __add__( self : List[str] , __A : Matrix ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : str , __A : Matrix ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : str , __A : Matrix | int | float ):
if isinstance(__A , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__A , __A ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(__A , __A ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self : Union[str, Any] , __A : int ):
if not isinstance(__A , __A ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
__UpperCamelCase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _lowerCamelCase ( cls : Tuple , __A : list[int] , __A : list[int] ):
return sum(row[i] * column[i] for i in range(len(__A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ : Any = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 63
|
'''simple docstring'''
import os
import numpy
import onnx
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__UpperCamelCase = a.name
__UpperCamelCase = b.name
__UpperCamelCase = ''
__UpperCamelCase = ''
__UpperCamelCase = a == b
__UpperCamelCase = name_a
__UpperCamelCase = name_b
return res
def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : List[Any] ) -> Optional[int]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__lowercase , __lowercase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase )
_graph_replace_input_with(node_proto.attribute[1].g , __lowercase , __lowercase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase )
def lowercase__ ( __lowercase : int , __lowercase : List[Any] , __lowercase : Dict ) -> int:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(__lowercase , __lowercase , __lowercase )
def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : str ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = list(model.graph.initializer )
__UpperCamelCase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__UpperCamelCase = inits[i].name
__UpperCamelCase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __lowercase , __lowercase )
def lowercase__ ( __lowercase : Dict ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = os.path.dirname(__lowercase )
__UpperCamelCase = os.path.basename(__lowercase )
__UpperCamelCase = onnx.load(os.path.join(__lowercase , __lowercase ) )
__UpperCamelCase = list(model.graph.initializer )
__UpperCamelCase = set()
__UpperCamelCase = {}
__UpperCamelCase = []
__UpperCamelCase = 0
for i in range(len(__lowercase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__lowercase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__lowercase )
dup_set.add(__lowercase )
__UpperCamelCase = inits[j].data_type
__UpperCamelCase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , __lowercase )
total_reduced_size += mem_size
__UpperCamelCase = inits[i].name
__UpperCamelCase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__lowercase )
else:
__UpperCamelCase = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
__UpperCamelCase = sorted(__lowercase )
_remove_dup_initializers_from_model(__lowercase , __lowercase , __lowercase )
__UpperCamelCase = 'optimized_' + model_file_name
__UpperCamelCase = os.path.join(__lowercase , __lowercase )
onnx.save(__lowercase , __lowercase )
return new_model
| 53
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple=7 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Optional[int]=18 , lowerCamelCase__ : int=30 , lowerCamelCase__ : Any=4_00 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Any=None , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = size if size is not None else {"shortest_edge": 20}
_UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
_UpperCAmelCase : int = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : List[str] = num_channels
_UpperCAmelCase : List[Any] = image_size
_UpperCAmelCase : Union[str, Any] = min_resolution
_UpperCAmelCase : int = max_resolution
_UpperCAmelCase : Dict = do_resize
_UpperCAmelCase : Union[str, Any] = size
_UpperCAmelCase : List[str] = do_center_crop
_UpperCAmelCase : Any = crop_size
def lowerCAmelCase__ ( self : Tuple ) ->Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase : str = MobileNetVaImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = MobileNetVaImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : List[Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "size" ) )
self.assertTrue(hasattr(__A , "do_center_crop" ) )
self.assertTrue(hasattr(__A , "crop_size" ) )
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
_UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def lowerCAmelCase__ ( self : List[str] ) ->Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
_UpperCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : List[str] = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
_UpperCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : Any = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
_UpperCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 234
|
'''simple docstring'''
import random
def lowercase__ ( __lowercase : list , __lowercase : Optional[Any] ) -> tuple:
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = [], [], []
for element in data:
if element < pivot:
less.append(__lowercase )
elif element > pivot:
greater.append(__lowercase )
else:
equal.append(__lowercase )
return less, equal, greater
def lowercase__ ( __lowercase : list , __lowercase : int ) -> Dict:
"""simple docstring"""
if index >= len(__lowercase ) or index < 0:
return None
__UpperCamelCase = items[random.randint(0 , len(__lowercase ) - 1 )]
__UpperCamelCase = 0
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = _partition(__lowercase , __lowercase )
__UpperCamelCase = len(__lowercase )
__UpperCamelCase = len(__lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__lowercase , __lowercase )
# must be in larger
else:
return quick_select(__lowercase , index - (m + count) )
| 53
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : List[str] = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase__ ( __lowercase : Any ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def lowercase__ ( __lowercase : Tuple ) -> int:
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase = emb.weight.shape
__UpperCamelCase = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
__UpperCamelCase = emb.weight.data
return lin_layer
def lowercase__ ( __lowercase : int , __lowercase : List[str]="facebook/mbart-large-en-ro" , __lowercase : str=False , __lowercase : List[Any]=False ) -> int:
"""simple docstring"""
__UpperCamelCase = torch.load(__lowercase , map_location='cpu' )['model']
remove_ignore_keys_(__lowercase )
__UpperCamelCase = state_dict['encoder.embed_tokens.weight'].shape[0]
__UpperCamelCase = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase )
if mbart_aa and finetuned:
__UpperCamelCase = 'relu'
__UpperCamelCase = state_dict['decoder.embed_tokens.weight']
__UpperCamelCase = MBartForConditionalGeneration(__lowercase )
model.model.load_state_dict(__lowercase )
if finetuned:
__UpperCamelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a__ : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
a__ : Union[str, Any] =parser.parse_args()
a__ : str =convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 53
| 0
|
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowercase__ = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
lowercase__ = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
lowercase__ = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
lowercase__ = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
lowercase__ = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : List[str]=[1, 10, 100] , lowercase_ : Dict=4 , lowercase_ : List[str]=3.0 ) -> int:
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=__A ) as executor:
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Optional[Any] = Counter()
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Optional[Any] = defaultdict(__A )
for task_id, (candidates, test_case) in enumerate(zip(__A , __A ) ):
for candidate in candidates:
UpperCAmelCase : Dict = candidate + '\n' + test_case
UpperCAmelCase : Tuple = (test_program, timeout, task_id, completion_id[task_id])
UpperCAmelCase : Optional[Any] = executor.submit(__A , *__A )
futures.append(__A )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__A ):
UpperCAmelCase : Union[str, Any] = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
UpperCAmelCase , UpperCAmelCase : Any = [], []
for result in results.values():
result.sort()
UpperCAmelCase : Tuple = [r[1]['passed'] for r in result]
total.append(len(__A ) )
correct.append(sum(__A ) )
UpperCAmelCase : Union[str, Any] = np.array(__A )
UpperCAmelCase : Optional[int] = np.array(__A )
UpperCAmelCase : Any = k
UpperCAmelCase : List[Any] = {f"""pass@{k}""": estimate_pass_at_k(__A , __A , __A ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
def estimator(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(__lowercase , __lowercase ):
UpperCAmelCase : Dict = itertools.repeat(__lowercase , len(__lowercase ) )
else:
assert len(__lowercase ) == len(__lowercase )
UpperCAmelCase : Any = iter(__lowercase )
return np.array([estimator(int(__lowercase ) , int(__lowercase ) , __lowercase ) for n, c in zip(__lowercase , __lowercase )] )
| 151
|
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Any , __A : Dict , __A : str , __A : List[Any]=1_0_2_4 , __A : Tuple=1_0_2_4 , __A : str=3.6 ):
__UpperCamelCase = tokenizer
__UpperCamelCase = tokenizer.bos_token_id
__UpperCamelCase = dataset
__UpperCamelCase = seq_length
__UpperCamelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Any ):
__UpperCamelCase = iter(self.dataset )
__UpperCamelCase = True
while more_examples:
__UpperCamelCase , __UpperCamelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__A )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
__UpperCamelCase = False
break
__UpperCamelCase = tokenizer(__A , truncation=__A )['input_ids']
__UpperCamelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__A ) , self.seq_length ):
__UpperCamelCase = all_token_ids[i : i + self.seq_length]
if len(__A ) == self.seq_length:
yield torch.tensor(__A )
def lowercase__ ( __lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = {'streaming': True}
__UpperCamelCase = load_dataset(args.dataset_name , split='train' , **__lowercase )
__UpperCamelCase = ConstantLengthDataset(__lowercase , __lowercase , seq_length=args.seq_length )
__UpperCamelCase = DataLoader(__lowercase , batch_size=args.batch_size )
return eval_dataloader
def lowercase__ ( __lowercase : Tuple ) -> Optional[Any]:
"""simple docstring"""
model.eval()
__UpperCamelCase = []
for step, batch in enumerate(__lowercase ):
with torch.no_grad():
__UpperCamelCase = model(__lowercase , labels=__lowercase )
__UpperCamelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__lowercase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__UpperCamelCase = torch.mean(torch.cat(__lowercase ) )
try:
__UpperCamelCase = torch.exp(__lowercase )
except OverflowError:
__UpperCamelCase = float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
a__ : int =Accelerator()
# Parse configuration
a__ : Dict =HfArgumentParser(EvaluationArguments)
a__ : Union[str, Any] =parser.parse_args()
set_seed(args.seed)
# Logging
a__ : List[Any] =logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
a__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(args.model_ckpt)
a__ : List[Any] =AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
a__ : Union[str, Any] =create_dataloader(args)
# Prepare everything with our `accelerator`.
a__ , a__ : List[str] =accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
a__ , a__ : Any =evaluate(args)
logger.info(f'loss/eval: {eval_loss}, perplexity: {perplexity}')
| 53
| 0
|
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ ="Salesforce/blip-image-captioning-base"
UpperCAmelCase_ =(
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
UpperCAmelCase_ ="image_captioner"
UpperCAmelCase_ =AutoModelForVisionaSeq
UpperCAmelCase_ =["image"]
UpperCAmelCase_ =["text"]
def __init__( self , *_A , **_A ) -> Tuple:
requires_backends(self , ['''vision'''] )
super().__init__(*__A , **__A )
def _UpperCamelCase ( self , _A ) -> Tuple:
return self.pre_processor(images=__A , return_tensors='''pt''' )
def _UpperCamelCase ( self , _A ) -> int:
return self.model.generate(**__A )
def _UpperCamelCase ( self , _A ) -> List[Any]:
return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0].strip()
| 299
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
a__ : Any =logging.get_logger(__name__)
a__ : Optional[Any] ={
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict ="gpt_neo"
SCREAMING_SNAKE_CASE_ : Optional[int] =["past_key_values"]
SCREAMING_SNAKE_CASE_ : List[Any] ={"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : Union[str, Any] , __A : Union[str, Any]=5_0_2_5_7 , __A : Any=2_0_4_8 , __A : Optional[Any]=2_0_4_8 , __A : Any=2_4 , __A : Union[str, Any]=[[["global", "local"], 1_2]] , __A : str=1_6 , __A : Optional[int]=None , __A : Union[str, Any]=2_5_6 , __A : Any="gelu_new" , __A : Dict=0.0 , __A : Optional[int]=0.0 , __A : int=0.0 , __A : List[str]=0.1 , __A : Any=1e-5 , __A : int=0.02 , __A : List[str]=True , __A : Tuple=5_0_2_5_6 , __A : Optional[Any]=5_0_2_5_6 , **__A : Optional[Any] , ):
__UpperCamelCase = vocab_size
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = hidden_size
__UpperCamelCase = num_layers
__UpperCamelCase = num_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = window_size
__UpperCamelCase = activation_function
__UpperCamelCase = resid_dropout
__UpperCamelCase = embed_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = classifier_dropout
__UpperCamelCase = layer_norm_epsilon
__UpperCamelCase = initializer_range
__UpperCamelCase = use_cache
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
__UpperCamelCase = attention_types
__UpperCamelCase = self.expand_attention_types_params(__A )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
@staticmethod
def _lowerCamelCase ( __A : Tuple ):
__UpperCamelCase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase__ ( __lowercase : Tuple , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : List[str] ) -> Any:
"""simple docstring"""
import torch
__UpperCamelCase = input.size()
__UpperCamelCase = len(__lowercase )
__UpperCamelCase = shape[dimension]
__UpperCamelCase = torch.arange(0 , __lowercase , __lowercase )
__UpperCamelCase = torch.div(sizedim - size , __lowercase , rounding_mode='floor' ) + 1
__UpperCamelCase = torch.arange(__lowercase ) + low_indices[:min_length][:, None]
__UpperCamelCase = [slice(__lowercase )] * rank
__UpperCamelCase = indices
__UpperCamelCase = input[s]
__UpperCamelCase = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__lowercase )
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
import torch
__UpperCamelCase = torch.arange(1 , __lowercase )
__UpperCamelCase = torch.remainder(__lowercase , __lowercase )
__UpperCamelCase = remainders == 0
__UpperCamelCase = candidates[divisor_indices]
__UpperCamelCase = torch.max(__lowercase )
return largest_divisor, torch.div(__lowercase , __lowercase , rounding_mode='floor' )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
@property
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__A , direction='inputs' )
__UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def _lowerCamelCase ( self : int ):
return self._config.num_heads
def _lowerCamelCase ( self : List[str] , __A : PreTrainedTokenizer , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional[TensorType] = None , ):
__UpperCamelCase = super(__A , self ).generate_dummy_inputs(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
# We need to order the input in the way they appears in the forward()
__UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__UpperCamelCase , __UpperCamelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__UpperCamelCase = seqlen + 2
__UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
__UpperCamelCase = common_inputs['attention_mask']
if self.use_past:
__UpperCamelCase = ordered_inputs['attention_mask'].dtype
__UpperCamelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__A , __A , dtype=__A )] , dim=1 )
return ordered_inputs
@property
def _lowerCamelCase ( self : Dict ):
return 1_3
| 53
| 0
|
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a__:
def __init__( self : int , __snake_case : str , __snake_case : Tuple=2 , __snake_case : Optional[Any]=3 , __snake_case : List[str]=4 , __snake_case : Union[str, Any]=2 , __snake_case : str=7 , __snake_case : Tuple=True , __snake_case : List[Any]=True , __snake_case : Tuple=True , __snake_case : int=True , __snake_case : List[Any]=99 , __snake_case : Any=36 , __snake_case : Any=3 , __snake_case : str=4 , __snake_case : Tuple=37 , __snake_case : Union[str, Any]="gelu" , __snake_case : Tuple=0.1 , __snake_case : int=0.1 , __snake_case : Tuple=5_12 , __snake_case : List[Any]=16 , __snake_case : Any=2 , __snake_case : Optional[int]=0.02 , __snake_case : Any=6 , __snake_case : Optional[Any]=6 , __snake_case : Tuple=3 , __snake_case : Optional[int]=4 , __snake_case : str=None , __snake_case : Tuple=10_00 , ):
a : Union[str, Any] = parent
a : List[Any] = batch_size
a : int = num_channels
a : str = image_size
a : int = patch_size
a : Dict = text_seq_length
a : Optional[int] = is_training
a : int = use_input_mask
a : List[str] = use_token_type_ids
a : Tuple = use_labels
a : Optional[int] = vocab_size
a : str = hidden_size
a : Tuple = num_hidden_layers
a : Dict = num_attention_heads
a : str = intermediate_size
a : List[Any] = hidden_act
a : Union[str, Any] = hidden_dropout_prob
a : List[str] = attention_probs_dropout_prob
a : int = max_position_embeddings
a : str = type_vocab_size
a : Any = type_sequence_label_size
a : str = initializer_range
a : Dict = coordinate_size
a : int = shape_size
a : Union[str, Any] = num_labels
a : Optional[Any] = num_choices
a : Optional[int] = scope
a : List[str] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
a : Optional[int] = text_seq_length
a : Optional[int] = (image_size // patch_size) ** 2 + 1
a : Dict = self.text_seq_length + self.image_seq_length
def lowercase_ ( self : Optional[int] ):
a : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
a : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a : Optional[int] = bbox[i, j, 3]
a : Dict = bbox[i, j, 1]
a : List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a : List[Any] = bbox[i, j, 2]
a : Any = bbox[i, j, 0]
a : List[str] = t
a : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a : Union[str, Any] = None
if self.use_input_mask:
a : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
a : int = None
if self.use_token_type_ids:
a : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
a : str = None
a : Optional[int] = None
if self.use_labels:
a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : int = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
a : int = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase_ ( self : Any , __snake_case : int , __snake_case : str , __snake_case : Optional[int] , __snake_case : int , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : List[str] ):
a : Optional[int] = LayoutLMvaModel(config=__A )
model.to(__A )
model.eval()
# text + image
a : Tuple = model(__A , pixel_values=__A )
a : Dict = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A )
a : Dict = model(__A , bbox=__A , pixel_values=__A , token_type_ids=__A )
a : Any = model(__A , bbox=__A , pixel_values=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
a : Optional[int] = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
a : str = model(pixel_values=__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase_ ( self : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : int , __snake_case : List[Any] ):
a : Optional[Any] = self.num_labels
a : Any = LayoutLMvaForSequenceClassification(__A )
model.to(__A )
model.eval()
a : Tuple = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : List[str] , __snake_case : Dict , __snake_case : Tuple , __snake_case : int , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : str , __snake_case : Dict , __snake_case : List[str] ):
a : str = self.num_labels
a : List[Any] = LayoutLMvaForTokenClassification(config=__A )
model.to(__A )
model.eval()
a : Dict = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase_ ( self : Tuple , __snake_case : Dict , __snake_case : Dict , __snake_case : List[str] , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Dict , __snake_case : int , __snake_case : Optional[int] ):
a : Union[str, Any] = LayoutLMvaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
a : str = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : List[str] ):
a : Tuple = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Tuple = config_and_inputs
a : Dict = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class a__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase__ = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowercase_ ( self : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def lowercase_ ( self : int ):
a : Optional[Any] = LayoutLMvaModelTester(self )
a : int = ConfigTester(self , config_class=__A , hidden_size=37 )
def lowercase_ ( self : str , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Tuple=False ):
a : int = copy.deepcopy(__A )
if model_class in get_values(__A ):
a : Dict = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__A , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__A ):
a : List[Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in get_values(__A ):
a : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
a : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in [
*get_values(__A ),
]:
a : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in [
*get_values(__A ),
]:
a : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__A , )
return inputs_dict
def lowercase_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowercase_ ( self : str ):
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowercase_ ( self : Any ):
a : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a : int = type
self.model_tester.create_and_check_model(*__A )
def lowercase_ ( self : Tuple ):
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def lowercase_ ( self : str ):
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
def lowercase_ ( self : Optional[Any] ):
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
@slow
def lowercase_ ( self : List[str] ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Optional[Any] = LayoutLMvaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase__ ( ):
a : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class a__( unittest.TestCase ):
@cached_property
def lowercase_ ( self : List[str] ):
return LayoutLMvaImageProcessor(apply_ocr=__A ) if is_vision_available() else None
@slow
def lowercase_ ( self : str ):
a : str = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(__A )
a : Union[str, Any] = self.default_image_processor
a : int = prepare_img()
a : int = image_processor(images=__A , return_tensors='pt' ).pixel_values.to(__A )
a : Optional[Any] = torch.tensor([[1, 2]] )
a : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
a : List[str] = model(
input_ids=input_ids.to(__A ) , bbox=bbox.to(__A ) , pixel_values=pixel_values.to(__A ) , )
# verify the logits
a : Optional[Any] = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , __A )
a : Union[str, Any] = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(__A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1e-4 ) )
| 297
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="naver-clova-ix/donut-base-finetuned-docvqa"
SCREAMING_SNAKE_CASE_ : Dict =(
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
SCREAMING_SNAKE_CASE_ : List[str] ="document_qa"
SCREAMING_SNAKE_CASE_ : Union[str, Any] =AutoProcessor
SCREAMING_SNAKE_CASE_ : Union[str, Any] =VisionEncoderDecoderModel
SCREAMING_SNAKE_CASE_ : List[Any] =["image", "text"]
SCREAMING_SNAKE_CASE_ : Any =["text"]
def __init__( self : Optional[int] , *__A : List[str] , **__A : List[Any] ):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*__A , **__A )
def _lowerCamelCase ( self : Any , __A : "Image" , __A : str ):
__UpperCamelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__UpperCamelCase = task_prompt.replace('{user_input}' , __A )
__UpperCamelCase = self.pre_processor.tokenizer(
__A , add_special_tokens=__A , return_tensors='pt' ).input_ids
__UpperCamelCase = self.pre_processor(__A , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _lowerCamelCase ( self : Union[str, Any] , __A : Optional[Any] ):
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__A , ).sequences
def _lowerCamelCase ( self : Tuple , __A : List[Any] ):
__UpperCamelCase = self.pre_processor.batch_decode(__A )[0]
__UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
__UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
__UpperCamelCase = re.sub(R'<.*?>' , '' , __A , count=1 ).strip() # remove first task start token
__UpperCamelCase = self.pre_processor.tokenajson(__A )
return sequence["answer"]
| 53
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class __A :
def __init__( self , a__ = 6 ):
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : str = None
self.create_linked_list(__A )
def __A ( self , a__ ):
_lowerCAmelCase : List[Any] = Node()
_lowerCAmelCase : int = current_node
_lowerCAmelCase : Tuple = current_node
_lowerCAmelCase : str = current_node
for _ in range(1 , __A ):
_lowerCAmelCase : str = Node()
_lowerCAmelCase : int = current_node
_lowerCAmelCase : Optional[int] = previous_node
_lowerCAmelCase : Union[str, Any] = current_node
_lowerCAmelCase : Optional[Any] = self.front
_lowerCAmelCase : Optional[int] = previous_node
def __A ( self ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __A ( self ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def __A ( self , a__ ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
_lowerCAmelCase : int = self.rear.next
if self.rear:
_lowerCAmelCase : int = data
def __A ( self ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
_lowerCAmelCase : Any = self.front.data
_lowerCAmelCase : List[str] = None
return data
_lowerCAmelCase : str = self.front
_lowerCAmelCase : Union[str, Any] = old_front.next
_lowerCAmelCase : Tuple = old_front.data
_lowerCAmelCase : Dict = None
return data
def __A ( self ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def __A ( self ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class __A :
def __init__( self ):
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : List[Any] = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 53
| 0
|
"""simple docstring"""
def _snake_case ( UpperCamelCase : int , UpperCamelCase : float , UpperCamelCase : float ):
return round(float(moles / volume ) * nfactor )
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
|
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowercase__ ( __lowercase : Features ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = np.inf
def set_batch_size(__lowercase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(__lowercase , __lowercase ):
__UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__lowercase , __lowercase ):
__UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__lowercase , __lowercase ) and feature.dtype == "binary":
__UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__lowercase , __lowercase )
return None if batch_size is np.inf else batch_size
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , __A : NestedDataStructureLike[PathLike] , __A : Optional[NamedSplit] = None , __A : Optional[Features] = None , __A : str = None , __A : bool = False , __A : bool = False , __A : Optional[int] = None , **__A : Dict , ):
super().__init__(
__A , split=__A , features=__A , cache_dir=__A , keep_in_memory=__A , streaming=__A , num_proc=__A , **__A , )
__UpperCamelCase = path_or_paths if isinstance(__A , __A ) else {self.split: path_or_paths}
__UpperCamelCase = _PACKAGED_DATASETS_MODULES['parquet'][1]
__UpperCamelCase = Parquet(
cache_dir=__A , data_files=__A , features=__A , hash=__A , **__A , )
def _lowerCamelCase ( self : Optional[int] ):
# Build iterable dataset
if self.streaming:
__UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
self.builder.download_and_prepare(
download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , num_proc=self.num_proc , )
__UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=__A , in_memory=self.keep_in_memory )
return dataset
class snake_case :
"""simple docstring"""
def __init__( self : List[str] , __A : Dataset , __A : Union[PathLike, BinaryIO] , __A : Optional[int] = None , **__A : Dict , ):
__UpperCamelCase = dataset
__UpperCamelCase = path_or_buf
__UpperCamelCase = batch_size or get_writer_batch_size(dataset.features )
__UpperCamelCase = parquet_writer_kwargs
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__UpperCamelCase = self._write(file_obj=__A , batch_size=__A , **self.parquet_writer_kwargs )
else:
__UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=__A , **self.parquet_writer_kwargs )
return written
def _lowerCamelCase ( self : List[str] , __A : BinaryIO , __A : int , **__A : List[str] ):
__UpperCamelCase = 0
__UpperCamelCase = parquet_writer_kwargs.pop('path_or_buf' , __A )
__UpperCamelCase = self.dataset.features.arrow_schema
__UpperCamelCase = pq.ParquetWriter(__A , schema=__A , **__A )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __A ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__UpperCamelCase = query_table(
table=self.dataset._data , key=slice(__A , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__A )
written += batch.nbytes
writer.close()
return written
| 53
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( A__ : str , A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = b.T
__lowerCamelCase = np.sum(np.square(__lowercase ) , axis=1 )
__lowerCamelCase = np.sum(np.square(__lowercase ) , axis=0 )
__lowerCamelCase = np.matmul(__lowercase , __lowercase )
__lowerCamelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowerCamelCase__ ( A__ : Tuple , A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = x.reshape(-1 , 3 )
__lowerCamelCase = squared_euclidean_distance(__lowercase , __lowercase )
return np.argmin(__lowercase , axis=1 )
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : List[Any] = ["pixel_values"]
def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[Union[List[List[int]], np.ndarray]] = None , UpperCamelCase_: bool = True , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_: bool = True , UpperCamelCase_: bool = True , **UpperCamelCase_: List[str] , ):
super().__init__(**__A )
__lowerCamelCase = size if size is not None else {"""height""": 2_56, """width""": 2_56}
__lowerCamelCase = get_size_dict(__A )
__lowerCamelCase = np.array(__A ) if clusters is not None else None
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = resample
__lowerCamelCase = do_normalize
__lowerCamelCase = do_color_quantize
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: np.ndarray , UpperCamelCase_: Dict[str, int] , UpperCamelCase_: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Union[str, Any] , ):
__lowerCamelCase = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
__A , size=(size["""height"""], size["""width"""]) , resample=__A , data_format=__A , **__A )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: np.ndarray , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , ):
__lowerCamelCase = rescale(image=__A , scale=1 / 127.5 , data_format=__A )
__lowerCamelCase = image - 1
return image
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: ImageInput , UpperCamelCase_: bool = None , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: PILImageResampling = None , UpperCamelCase_: bool = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[Union[List[List[int]], np.ndarray]] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **UpperCamelCase_: Optional[int] , ):
__lowerCamelCase = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase = size if size is not None else self.size
__lowerCamelCase = get_size_dict(__A )
__lowerCamelCase = resample if resample is not None else self.resample
__lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__lowerCamelCase = clusters if clusters is not None else self.clusters
__lowerCamelCase = np.array(__A )
__lowerCamelCase = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(__A ) for image in images]
if do_resize:
__lowerCamelCase = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_normalize:
__lowerCamelCase = [self.normalize(image=__A ) for image in images]
if do_color_quantize:
__lowerCamelCase = [to_channel_dimension_format(__A , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__lowerCamelCase = np.array(__A )
__lowerCamelCase = color_quantize(__A , __A ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__lowerCamelCase = images.shape[0]
__lowerCamelCase = images.reshape(__A , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__lowerCamelCase = list(__A )
else:
__lowerCamelCase = [to_channel_dimension_format(__A , __A ) for image in images]
__lowerCamelCase = {"""input_ids""": images}
return BatchFeature(data=__A , tensor_type=__A )
| 12
|
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def lowercase__ ( __lowercase : SplitDict ) -> int:
"""simple docstring"""
__UpperCamelCase = split_dict._to_yaml_list()
assert len(__lowercase ) == len(__lowercase )
__UpperCamelCase = SplitDict._from_yaml_list(__lowercase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
__UpperCamelCase = None
# the split name of split_dict takes over the name of the split info object
__UpperCamelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name='my_dataset' )] )
def lowercase__ ( __lowercase : Dict ) -> Any:
"""simple docstring"""
__UpperCamelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 53
| 0
|
def A ( _lowercase ):
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
SCREAMING_SNAKE_CASE : Optional[int] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE : Tuple = 1
if upper_limit > 0:
SCREAMING_SNAKE_CASE : Optional[int] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__lowercase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
__UpperCamelCase : Union[str, Any] = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 182
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : List[str] ={
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any =[
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 53
| 0
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class UpperCAmelCase__ ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _a ( self ) -> str:
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=__A , )
def _a ( self , A_ , A_ ) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def _a ( self , A_ , A_ ) -> Any:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__A )
class UpperCAmelCase__ ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _a ( self ) -> str:
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=__A , )
def _a ( self , A_ , A_ ) -> Tuple:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def _a ( self , A_ , A_ ) -> str:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__A )
def _UpperCAmelCase ( ):
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
def _UpperCAmelCase ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
@require_beam
def _a ( self ) -> int:
__UpperCamelCase =len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase =DummyBeamDataset(cache_dir=__A , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__A , builder.name , 'default' , '0.0.0' , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
__UpperCamelCase =builder.as_dataset()
self.assertEqual(dset['train'].num_rows , __A )
self.assertEqual(dset['train'].info.splits['train'].num_examples , __A )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__A , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _a ( self ) -> Tuple:
import apache_beam as beam
__UpperCamelCase =beam.io.parquetio.WriteToParquet
__UpperCamelCase =len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase =DummyBeamDataset(cache_dir=__A , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
__UpperCamelCase =partial(__A , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__A , builder.name , 'default' , '0.0.0' , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__A , builder.name , 'default' , '0.0.0' , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
__UpperCamelCase =builder.as_dataset()
self.assertEqual(dset['train'].num_rows , __A )
self.assertEqual(dset['train'].info.splits['train'].num_examples , __A )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(__A , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _a ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase =DummyBeamDataset(cache_dir=__A )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _a ( self ) -> Tuple:
__UpperCamelCase =len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase =NestedBeamDataset(cache_dir=__A , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__A , builder.name , 'default' , '0.0.0' , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
__UpperCamelCase =builder.as_dataset()
self.assertEqual(dset['train'].num_rows , __A )
self.assertEqual(dset['train'].info.splits['train'].num_examples , __A )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__A , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 62
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a__ : str =logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =["input_features", "attention_mask"]
def __init__( self : Union[str, Any] , __A : Optional[int]=8_0 , __A : Tuple=1_6_0_0_0 , __A : Optional[Any]=8_0 , __A : Any=0.0 , __A : Any=True , __A : List[str]=True , __A : str=True , **__A : List[Any] , ):
super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A )
__UpperCamelCase = num_mel_bins
__UpperCamelCase = do_ceptral_normalize
__UpperCamelCase = normalize_means
__UpperCamelCase = normalize_vars
__UpperCamelCase = True
def _lowerCamelCase ( self : Union[str, Any] , __A : np.ndarray , ):
__UpperCamelCase = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
__UpperCamelCase = torch.from_numpy(__A ).unsqueeze(0 )
__UpperCamelCase = ta_kaldi.fbank(__A , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _lowerCamelCase ( __A : np.ndarray , __A : int , __A : Optional[bool] = True , __A : Optional[bool] = True , __A : float = 0.0 , ):
# make sure we normalize float32 arrays
if normalize_means:
__UpperCamelCase = x[:input_length].mean(axis=0 )
__UpperCamelCase = np.subtract(__A , __A )
if normalize_vars:
__UpperCamelCase = x[:input_length].std(axis=0 )
__UpperCamelCase = np.divide(__A , __A )
if input_length < x.shape[0]:
__UpperCamelCase = padding_value
# make sure array is in float32
__UpperCamelCase = x.astype(np.floataa )
return x
def _lowerCamelCase ( self : int , __A : List[np.ndarray] , __A : Optional[np.ndarray] = None ):
__UpperCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__A , __A , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__A , __A )
]
def __call__( self : List[Any] , __A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __A : Union[bool, str, PaddingStrategy] = False , __A : Optional[int] = None , __A : bool = False , __A : Optional[int] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[int] = None , __A : Optional[bool] = None , **__A : Dict , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__UpperCamelCase = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__UpperCamelCase = is_batched_numpy or (
isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__A , np.ndarray ):
__UpperCamelCase = np.asarray(__A , dtype=np.floataa )
elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCamelCase = [raw_speech]
# extract fbank features
__UpperCamelCase = [self._extract_fbank_features(__A ) for waveform in raw_speech]
# convert into correct format for padding
__UpperCamelCase = BatchFeature({'input_features': features} )
__UpperCamelCase = self.pad(
__A , padding=__A , max_length=__A , truncation=__A , pad_to_multiple_of=__A , return_attention_mask=__A , **__A , )
# make sure list is in array format
__UpperCamelCase = padded_inputs.get('input_features' )
if isinstance(input_features[0] , __A ):
__UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for feature in input_features]
__UpperCamelCase = padded_inputs.get('attention_mask' )
if attention_mask is not None:
__UpperCamelCase = [np.asarray(__A , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__UpperCamelCase = (
np.array(__A , dtype=np.intaa )
if self._get_padding_strategies(__A , max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__UpperCamelCase = self.normalize(
padded_inputs['input_features'] , attention_mask=__A )
if return_tensors is not None:
__UpperCamelCase = padded_inputs.convert_to_tensors(__A )
return padded_inputs
| 53
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE (__lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
__a =GPTaTokenizer
__a =GPTaTokenizerFast
__a =True
__a ={"add_prefix_space": True}
__a =False
def UpperCamelCase__ ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_a = dict(zip(__A , range(len(__A ) ) ) )
_a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_a = {"unk_token": "<unk>"}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def UpperCamelCase__ ( self : Tuple , **__a : Any ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__A )
def UpperCamelCase__ ( self : Union[str, Any] , **__a : List[str] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__A )
def UpperCamelCase__ ( self : Optional[Any] , __a : str ):
_a = "lower newer"
_a = "lower newer"
return input_text, output_text
def UpperCamelCase__ ( self : Dict ):
_a = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = "lower newer"
_a = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_a = tokenizer.tokenize(__A , add_prefix_space=__A )
self.assertListEqual(__A , __A )
_a = tokens + [tokenizer.unk_token]
_a = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def UpperCamelCase__ ( self : List[Any] ):
if not self.test_rust_tokenizer:
return
_a = self.get_tokenizer()
_a = self.get_rust_tokenizer(add_prefix_space=__A )
_a = "lower newer"
# Testing tokenization
_a = tokenizer.tokenize(__A , add_prefix_space=__A )
_a = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids without special tokens
_a = tokenizer.encode(__A , add_special_tokens=__A , add_prefix_space=__A )
_a = rust_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids with special tokens
_a = self.get_rust_tokenizer(add_prefix_space=__A )
_a = tokenizer.encode(__A , add_prefix_space=__A )
_a = rust_tokenizer.encode(__A )
self.assertListEqual(__A , __A )
# Testing the unknown token
_a = tokens + [rust_tokenizer.unk_token]
_a = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__A ) , __A )
def UpperCamelCase__ ( self : Any , *__a : Union[str, Any] , **__a : Optional[Any] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def UpperCamelCase__ ( self : List[str] , __a : Any=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(__A , **__A )
# Simple input
_a = "This is a simple input"
_a = ["This is a simple input 1", "This is a simple input 2"]
_a = ("This is a simple input", "This is a pair")
_a = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="max_length" )
# Simple input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="max_length" )
# Simple input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="max_length" , )
# Pair input
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="max_length" )
# Pair input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="max_length" )
# Pair input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="max_length" , )
def UpperCamelCase__ ( self : List[Any] ):
_a = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
_a = "This is a simple input"
_a = ["This is a simple input looooooooong", "This is a simple input"]
_a = ("This is a simple input", "This is a pair")
_a = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_a = tokenizer.pad_token_id
_a = tokenizer(__A , padding="max_length" , max_length=30 , return_tensors="np" )
_a = tokenizer(__A , padding=__A , truncate=__A , return_tensors="np" )
_a = tokenizer(*__A , padding="max_length" , max_length=60 , return_tensors="np" )
_a = tokenizer(__A , padding=__A , truncate=__A , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def UpperCamelCase__ ( self : Any ):
_a = "$$$"
_a = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__A , add_bos_token=__A )
_a = "This is a simple input"
_a = ["This is a simple input 1", "This is a simple input 2"]
_a = tokenizer.bos_token_id
_a = tokenizer(__A )
_a = tokenizer(__A )
self.assertEqual(out_s.input_ids[0] , __A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_a = tokenizer.decode(out_s.input_ids )
_a = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def UpperCamelCase__ ( self : int ):
pass
def UpperCamelCase__ ( self : int ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
_a = [self.get_tokenizer(do_lower_case=__A , add_bos_token=__A )]
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_a = "Encode this."
_a = "This one too please."
_a = tokenizer.encode(__A , add_special_tokens=__A )
encoded_sequence += tokenizer.encode(__A , add_special_tokens=__A )
_a = tokenizer.encode_plus(
__A , __A , add_special_tokens=__A , return_special_tokens_mask=__A , )
_a = encoded_sequence_dict["input_ids"]
_a = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(__A ) , len(__A ) )
_a = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__A )
]
_a = [x for x in filtered_sequence if x is not None]
self.assertEqual(__A , __A )
@require_tokenizers
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_a = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__A )
_a = "A photo of a cat"
_a = tokenizer.encode(
__A , )
self.assertEqual(__A , [2, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("test_opt" )
_a = AutoTokenizer.from_pretrained("./test_opt" )
_a = tokenizer.encode(
__A , )
self.assertEqual(__A , [2, 2_50, 13_45, 9, 10, 47_58] )
def UpperCamelCase__ ( self : List[Any] ):
_a = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__A )
_a = "A photo of a cat"
_a = tokenizer.encode(
__A , )
# Same as above
self.assertEqual(__A , [2, 2_50, 13_45, 9, 10, 47_58] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__A )
_a = "bos"
_a = tokenizer.get_vocab()["bos"]
_a = "A photo of a cat"
_a = tokenizer.encode(
__A , )
# We changed the bos token
self.assertEqual(__A , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("./tok" )
_a = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
_a = tokenizer.encode(
__A , )
self.assertEqual(__A , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
| 63
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : List[Any] =logging.get_logger(__name__)
a__ : List[Any] ={
'''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="altclip_text_model"
def __init__( self : str , __A : List[Any]=2_5_0_0_0_2 , __A : Any=1_0_2_4 , __A : int=2_4 , __A : Dict=1_6 , __A : Optional[Any]=4_0_9_6 , __A : Union[str, Any]="gelu" , __A : Dict=0.1 , __A : Dict=0.1 , __A : List[str]=5_1_4 , __A : Optional[int]=1 , __A : int=0.02 , __A : Optional[Any]=0.02 , __A : Optional[Any]=1e-05 , __A : Dict=1 , __A : List[Any]=0 , __A : int=2 , __A : Tuple="absolute" , __A : Optional[Any]=True , __A : Optional[int]=7_6_8 , **__A : List[str] , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = initializer_factor
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = project_dim
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="altclip_vision_model"
def __init__( self : List[Any] , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=3_0_7_2 , __A : Optional[Any]=5_1_2 , __A : Tuple=1_2 , __A : Union[str, Any]=1_2 , __A : Optional[int]=3 , __A : Dict=2_2_4 , __A : Tuple=3_2 , __A : str="quick_gelu" , __A : Dict=1e-5 , __A : Optional[int]=0.0 , __A : List[Any]=0.02 , __A : int=1.0 , **__A : Optional[int] , ):
super().__init__(**__A )
__UpperCamelCase = hidden_size
__UpperCamelCase = intermediate_size
__UpperCamelCase = projection_dim
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = num_channels
__UpperCamelCase = patch_size
__UpperCamelCase = image_size
__UpperCamelCase = initializer_range
__UpperCamelCase = initializer_factor
__UpperCamelCase = attention_dropout
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = hidden_act
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ):
cls._set_token_in_kwargs(__A )
__UpperCamelCase , __UpperCamelCase = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type' ) == "altclip":
__UpperCamelCase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] ="altclip"
SCREAMING_SNAKE_CASE_ : Optional[int] =True
def __init__( self : Any , __A : List[str]=None , __A : List[Any]=None , __A : List[str]=7_6_8 , __A : List[str]=2.6592 , **__A : Dict ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
__UpperCamelCase = kwargs.pop('text_config_dict' , __A )
__UpperCamelCase = kwargs.pop('vision_config_dict' , __A )
super().__init__(**__A )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
__UpperCamelCase = {}
# This is the complete result when using `text_config_dict`.
__UpperCamelCase = AltCLIPTextConfig(**__A ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
__UpperCamelCase = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
__UpperCamelCase = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(__A )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
__UpperCamelCase = {}
# This is the complete result when using `vision_config_dict`.
__UpperCamelCase = AltCLIPVisionConfig(**__A ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
__UpperCamelCase = {
str(__A ): value for key, value in _vision_config_dict['id2label'].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
__UpperCamelCase = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
__UpperCamelCase = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(__A )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
__UpperCamelCase = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' )
if vision_config is None:
__UpperCamelCase = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' )
__UpperCamelCase = AltCLIPTextConfig(**__A )
__UpperCamelCase = AltCLIPVisionConfig(**__A )
__UpperCamelCase = projection_dim
__UpperCamelCase = logit_scale_init_value
__UpperCamelCase = 1.0
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , __A : AltCLIPTextConfig , __A : AltCLIPVisionConfig , **__A : Optional[Any] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A )
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.text_config.to_dict()
__UpperCamelCase = self.vision_config.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 53
| 0
|
'''simple docstring'''
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCamelCase__ = get_logger(__name__)
class lowerCAmelCase__ :
lowerCAmelCase : str = "dummy_data"
lowerCAmelCase : List[str] = "datasets"
lowerCAmelCase : Dict = False
def __init__( self : str , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Union[Version, str] , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[List[Callable]] = None , ) ->int:
'''simple docstring'''
_UpperCAmelCase : str = 0
_UpperCAmelCase : Optional[Any] = dataset_name
_UpperCAmelCase : str = cache_dir
_UpperCAmelCase : int = use_local_dummy_data
_UpperCAmelCase : Dict = config
# download_callbacks take a single url as input
_UpperCAmelCase : str = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_UpperCAmelCase : Optional[Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_UpperCAmelCase : Tuple = str(__A )
# to be downloaded
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[Any] = None
@property
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
if self._dummy_file is None:
_UpperCAmelCase : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->Dict:
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Dict = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_UpperCAmelCase : Tuple = cached_path(
__A , cache_dir=self.cache_dir , extract_compressed_file=__A , force_extract=__A )
return os.path.join(__A , self.dummy_file_name )
@property
def lowerCAmelCase__ ( self : List[str] ) ->Optional[int]:
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
if self._bucket_url is None:
_UpperCAmelCase : str = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int] , *lowerCamelCase__ : Optional[int] ) ->Any:
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_UpperCAmelCase : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_UpperCAmelCase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__A , __A ):
return self.create_dummy_data_dict(__A , __A )
elif isinstance(__A , (list, tuple) ):
return self.create_dummy_data_list(__A , __A )
else:
return self.create_dummy_data_single(__A , __A )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Dict , *lowerCamelCase__ : Any ) ->Optional[int]:
'''simple docstring'''
return self.download_and_extract(__A )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict ) ->Union[str, Any]:
'''simple docstring'''
return self.download_and_extract(__A )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Any , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
return path
def lowerCAmelCase__ ( self : str ) ->Optional[int]:
'''simple docstring'''
return {}
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase : str = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__A , __A ):
for single_url in single_urls:
download_callback(__A )
else:
_UpperCAmelCase : Dict = single_urls
download_callback(__A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__A , __A ):
_UpperCAmelCase : List[str] = [os.path.join(__A , urllib.parse.quote_plus(Path(__A ).name ) ) for x in single_urls]
else:
_UpperCAmelCase : Tuple = single_urls
_UpperCAmelCase : int = os.path.join(__A , urllib.parse.quote_plus(Path(__A ).name ) )
_UpperCAmelCase : Any = value
# make sure that values are unique
if all(isinstance(__A , __A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_UpperCAmelCase : Optional[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_UpperCAmelCase : Tuple = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __A ) ) for url in data_url )
_UpperCAmelCase : Any = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_UpperCAmelCase : Any = [data_url[0]] * len(__A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_UpperCAmelCase : Optional[Any] = os.path.join(__A , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(__A )
return dummy_data_list
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] ) ->Optional[int]:
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(__A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_UpperCAmelCase : Any = os.path.join(__A , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(__A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowerCAmelCase__ ( self : str ) ->Any:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Dict ) ->Tuple:
'''simple docstring'''
def _iter_archive_members(lowerCamelCase__ : Tuple ):
# this preserves the order of the members inside the ZIP archive
_UpperCAmelCase : Tuple = Path(self.dummy_file ).parent
_UpperCAmelCase : List[str] = path.relative_to(__A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_UpperCAmelCase : List[Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__A )
_UpperCAmelCase : str = Path(__A )
_UpperCAmelCase : Union[str, Any] = _iter_archive_members(__A ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(__A ).as_posix(), file_path.open("rb" )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
if not isinstance(__A , __A ):
_UpperCAmelCase : Any = [paths]
for path in paths:
if os.path.isfile(__A ):
if os.path.basename(__A ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__A ):
if os.path.basename(__A ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(__A ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(__A , __A )
| 234
|
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Any ) -> Optional[Any]:
"""simple docstring"""
with open(__lowercase ) as metadata_file:
__UpperCamelCase = json.load(__lowercase )
__UpperCamelCase = LukeConfig(use_entity_aware_attention=__lowercase , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
__UpperCamelCase = torch.load(__lowercase , map_location='cpu' )
# Load the entity vocab file
__UpperCamelCase = load_entity_vocab(__lowercase )
__UpperCamelCase = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
__UpperCamelCase = AddedToken('<ent>' , lstrip=__lowercase , rstrip=__lowercase )
__UpperCamelCase = AddedToken('<ent2>' , lstrip=__lowercase , rstrip=__lowercase )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(__lowercase , __lowercase )
__UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase )
# Initialize the embeddings of the special tokens
__UpperCamelCase = state_dict['embeddings.word_embeddings.weight']
__UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
__UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
__UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__UpperCamelCase = F'''encoder.layer.{layer_index}.attention.self.'''
__UpperCamelCase = state_dict[prefix + matrix_name]
__UpperCamelCase = state_dict[prefix + matrix_name]
__UpperCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__UpperCamelCase = state_dict['entity_embeddings.entity_embeddings.weight']
__UpperCamelCase = entity_emb[entity_vocab['[MASK]']]
__UpperCamelCase = LukeModel(config=__lowercase ).eval()
__UpperCamelCase , __UpperCamelCase = model.load_state_dict(__lowercase , strict=__lowercase )
if not (len(__lowercase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'''Missing keys {', '.join(__lowercase )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
F''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' )
# Check outputs
__UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase , task='entity_classification' )
__UpperCamelCase = (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
__UpperCamelCase = (39, 42)
__UpperCamelCase = tokenizer(__lowercase , entity_spans=[span] , add_prefix_space=__lowercase , return_tensors='pt' )
__UpperCamelCase = model(**__lowercase )
# Verify word hidden states
if model_size == "large":
__UpperCamelCase = torch.Size((1, 42, 1024) )
__UpperCamelCase = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
__UpperCamelCase = torch.Size((1, 42, 768) )
__UpperCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
__UpperCamelCase = torch.Size((1, 1, 1024) )
__UpperCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
__UpperCamelCase = torch.Size((1, 1, 768) )
__UpperCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(__lowercase ) )
model.save_pretrained(__lowercase )
def lowercase__ ( __lowercase : Dict ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = {}
with open(__lowercase , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(__lowercase ):
__UpperCamelCase , __UpperCamelCase = line.rstrip().split('\t' )
__UpperCamelCase = index
return entity_vocab
if __name__ == "__main__":
a__ : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
a__ : str =parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 53
| 0
|
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
UpperCamelCase__ : str = getLogger(__name__)
UpperCamelCase__ : List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def UpperCAmelCase ( a_ , a_ , a_ , a_ = 8 , a_ = DEFAULT_DEVICE , a_=False , a_="summarization" , a_=None , **a_ , ) -> Dict:
"""simple docstring"""
A_ : List[Any] = Path(__lowercase ).open("""w""" , encoding="""utf-8""" )
A_ : List[Any] = str(__lowercase )
A_ : str = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ).to(__lowercase )
if fpaa:
A_ : int = model.half()
A_ : Optional[Any] = AutoTokenizer.from_pretrained(__lowercase )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
A_ : int = time.time()
# update config with task specific params
use_task_specific_params(__lowercase , __lowercase )
if prefix is None:
A_ : Dict = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
for examples_chunk in tqdm(list(chunks(__lowercase , __lowercase ) ) ):
A_ : int = [prefix + text for text in examples_chunk]
A_ : str = tokenizer(__lowercase , return_tensors="""pt""" , truncation=__lowercase , padding="""longest""" ).to(__lowercase )
A_ : str = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **__lowercase , )
A_ : Dict = tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
A_ : List[Any] = int(time.time() - start_time ) # seconds
A_ : Optional[Any] = len(__lowercase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def UpperCAmelCase ( a_=True ) -> Union[str, Any]:
"""simple docstring"""
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""model_name""" , type=__lowercase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""" , type=__lowercase , help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""" , type=__lowercase , help="""where to save summaries""" )
parser.add_argument("""--reference_path""" , type=__lowercase , required=__lowercase , help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""" , type=__lowercase , required=__lowercase , default="""metrics.json""" , help="""where to save metrics""" )
parser.add_argument("""--device""" , type=__lowercase , required=__lowercase , default=__lowercase , help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""" , type=__lowercase , required=__lowercase , default=__lowercase , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""" , type=__lowercase , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=__lowercase , default=8 , required=__lowercase , help="""batch size""" )
parser.add_argument(
"""--n_obs""" , type=__lowercase , default=-1 , required=__lowercase , help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""" , nargs="""?""" , type=__lowercase , const=datetime_now() , help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
A_ , A_ : Optional[int] = parser.parse_known_args()
A_ : Tuple = parse_numeric_n_bool_cl_kwargs(__lowercase )
if parsed_args and verbose:
print(F"parsed the following generate kwargs: {parsed_args}" )
A_ : Union[str, Any] = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
A_ : Tuple = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=__lowercase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can\'t mix --fp16 and --device cpu""" )
A_ : List[Any] = generate_summaries_or_translations(
__lowercase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **__lowercase , )
if args.reference_path is None:
return {}
# Compute scores
A_ : List[str] = calculate_bleu if """translation""" in args.task else calculate_rouge
A_ : Tuple = [x.rstrip() for x in open(args.save_path ).readlines()]
A_ : List[Any] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(__lowercase )]
A_ : int = score_fn(__lowercase , __lowercase )
scores.update(__lowercase )
if args.dump_args:
scores.update(__lowercase )
if args.info:
A_ : Optional[Any] = args.info
if verbose:
print(__lowercase )
if args.score_path is not None:
json.dump(__lowercase , open(args.score_path , """w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 344
|
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = 8
# DPR tok
__UpperCamelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__UpperCamelCase = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(__A , exist_ok=__A )
__UpperCamelCase = os.path.join(__A , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__UpperCamelCase = dict(zip(__A , range(len(__A ) ) ) )
__UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__UpperCamelCase = {'unk_token': '<unk>'}
__UpperCamelCase = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(__A , exist_ok=__A )
__UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__A ) )
def _lowerCamelCase ( self : Tuple ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _lowerCamelCase ( self : Optional[int] ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _lowerCamelCase ( self : Union[str, Any] ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def _lowerCamelCase ( self : str ):
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = self.get_dummy_dataset()
__UpperCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__UpperCamelCase = dataset
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowerCamelCase ( self : Any , __A : bool ):
__UpperCamelCase = self.get_dummy_dataset()
__UpperCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
__UpperCamelCase = os.path.join(self.tmpdirname , 'dataset' )
__UpperCamelCase = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , )
return retriever
def _lowerCamelCase ( self : int ):
__UpperCamelCase = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCamelCase = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
__UpperCamelCase = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
__UpperCamelCase = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(__A , open(__A , 'wb' ) )
__UpperCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_canonical_hf_index_retriever()
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__UpperCamelCase = self.get_dummy_dataset()
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_legacy_index_retriever()
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , __A )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowerCamelCase ( self : Optional[Any] ):
import torch
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_canonical_hf_index_retriever()
__UpperCamelCase = [[5, 7], [1_0, 1_1]]
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , np.ndarray )
__UpperCamelCase = retriever(
__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors='pt' , )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.get_dpr_ctx_encoder_tokenizer()
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
retriever.set_ctx_encoder_tokenizer(__A )
__UpperCamelCase = [[5, 7], [1_0, 1_1]]
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
self.assertEqual(
len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , __A ) # check for doc token related keys in dictionary.
| 53
| 0
|
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , **lowercase_ : str ) -> Tuple:
if tokenize_kwargs is None:
UpperCAmelCase : List[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
UpperCAmelCase : Tuple = truncation
UpperCAmelCase : Any = tokenize_kwargs
UpperCAmelCase : str = {}
if return_tensors is not None:
UpperCAmelCase : List[str] = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Any , **lowercase_ : Dict ) -> int:
UpperCAmelCase : List[str] = self.framework
UpperCAmelCase : Any = self.tokenizer(__A , return_tensors=__A , **__A )
return model_inputs
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Tuple ) -> List[Any]:
UpperCAmelCase : Optional[int] = self.model(**__A )
return model_outputs
def UpperCAmelCase_ ( self : str , lowercase_ : Tuple , lowercase_ : Tuple=False ) -> Tuple:
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : List[Any] , *lowercase_ : Dict , **lowercase_ : int ) -> Optional[int]:
return super().__call__(*__A , **__A )
| 151
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : List[Any] ={
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] =[
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
a__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 53
| 0
|
import math
import sys
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = ''''''
try:
with open(__lowercase, '''rb''' ) as binary_file:
SCREAMING_SNAKE_CASE_ = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE_ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {'''0''': '''0''', '''1''': '''1'''}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = '''''', ''''''
SCREAMING_SNAKE_CASE_ = len(__lowercase )
for i in range(len(__lowercase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE_ = lexicon[curr_string]
result += last_match_id
SCREAMING_SNAKE_CASE_ = last_match_id + '''0'''
if math.loga(__lowercase ).is_integer():
SCREAMING_SNAKE_CASE_ = {}
for curr_key in list(__lowercase ):
SCREAMING_SNAKE_CASE_ = lexicon.pop(__lowercase )
SCREAMING_SNAKE_CASE_ = new_lex
SCREAMING_SNAKE_CASE_ = last_match_id + '''1'''
index += 1
SCREAMING_SNAKE_CASE_ = ''''''
return result
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = 8
try:
with open(__lowercase, '''wb''' ) as opened_file:
SCREAMING_SNAKE_CASE_ = [
to_write[i : i + byte_length]
for i in range(0, len(__lowercase ), __lowercase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__lowercase, 2 ).to_bytes(1, byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
SCREAMING_SNAKE_CASE_ = data_bits[counter:]
SCREAMING_SNAKE_CASE_ = data_bits[counter + 1 :]
return data_bits
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = read_file_binary(__lowercase )
SCREAMING_SNAKE_CASE_ = remove_prefix(__lowercase )
SCREAMING_SNAKE_CASE_ = decompress_data(__lowercase )
write_file_binary(__lowercase, __lowercase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 299
|
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any]=False ) -> Tuple:
"""simple docstring"""
try:
__UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__UpperCamelCase = strtobool(__lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
a__ : str =parse_flag_from_env('''RUN_SLOW''', default=False)
a__ : Union[str, Any] =parse_flag_from_env('''RUN_REMOTE''', default=False)
a__ : List[str] =parse_flag_from_env('''RUN_LOCAL''', default=True)
a__ : Optional[int] =parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
a__ : Any =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
a__ : Optional[int] =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
a__ : List[str] =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
a__ : Any =pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
a__ : Tuple =pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
a__ : Union[str, Any] =pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
a__ : int =pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def lowercase__ ( __lowercase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
__UpperCamelCase = unittest.skip('test requires faiss' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Union[str, Any] ) -> Any:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
__UpperCamelCase = unittest.skip('test requires regex' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Tuple ) -> List[Any]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
__UpperCamelCase = unittest.skip('test requires elasticsearch' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
__UpperCamelCase = unittest.skip('test requires sqlalchemy' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : List[str] ) -> List[str]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
__UpperCamelCase = unittest.skip('test requires PyTorch' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Optional[Any] ) -> List[str]:
"""simple docstring"""
if not config.TF_AVAILABLE:
__UpperCamelCase = unittest.skip('test requires TensorFlow' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
if not config.JAX_AVAILABLE:
__UpperCamelCase = unittest.skip('test requires JAX' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : str ) -> Optional[Any]:
"""simple docstring"""
if not config.PIL_AVAILABLE:
__UpperCamelCase = unittest.skip('test requires Pillow' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Dict ) -> Any:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : int ) -> int:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : str ) -> int:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : str ) -> Any:
"""simple docstring"""
def _require_spacy_model(__lowercase : Any ):
try:
import spacy # noqa F401
spacy.load(__lowercase )
except ImportError:
return unittest.skip('test requires spacy' )(__lowercase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__lowercase ) )(__lowercase )
else:
return test_case
return _require_spacy_model
def lowercase__ ( __lowercase : Union[str, Any] ) -> str:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : List[Any] ) -> List[str]:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
__UpperCamelCase = unittest.skip('test is slow' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : List[Any] ) -> List[str]:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
__UpperCamelCase = unittest.skip('test is local' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : str ) -> List[str]:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
__UpperCamelCase = unittest.skip('test is packaged' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Optional[int] ) -> Any:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
__UpperCamelCase = unittest.skip('test requires remote' )(__lowercase )
return test_case
def lowercase__ ( *__lowercase : Optional[Any] ) -> Tuple:
"""simple docstring"""
def decorate(cls : int ):
for name, fn in cls.__dict__.items():
if callable(__lowercase ) and name.startswith('test' ):
for decorator in decorators:
__UpperCamelCase = decorator(__lowercase )
setattr(cls , __lowercase , __lowercase )
return cls
return decorate
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
pass
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =0
SCREAMING_SNAKE_CASE_ : List[Any] =1
SCREAMING_SNAKE_CASE_ : Union[str, Any] =2
@contextmanager
def lowercase__ ( __lowercase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , __lowercase : Dict=1e-16 ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = requests.Session().request
def timeout_request(__lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , **__lowercase : List[str] ):
# Change the url to an invalid url so that the connection hangs
__UpperCamelCase = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__UpperCamelCase = timeout
try:
return online_request(__lowercase , __lowercase , **__lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__UpperCamelCase = url
__UpperCamelCase = e.args[0]
__UpperCamelCase = (max_retry_error.args[0].replace('10.255.255.1' , F'''OfflineMock[{url}]''' ),)
__UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(__lowercase : int , __lowercase : List[str] , **__lowercase : Union[str, Any] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __lowercase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def lowercase__ ( *__lowercase : Any , **__lowercase : Dict ) -> Dict:
"""simple docstring"""
__UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__lowercase , **__lowercase ) as tmp_dir:
try:
os.chdir(__lowercase )
yield
finally:
os.chdir(__lowercase )
@contextmanager
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowercase__ ( __lowercase : List[str] , __lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
return deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist()
def lowercase__ ( __lowercase : str ) -> List[str]:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(__lowercase : List[Any] , *__lowercase : Tuple , **__lowercase : Union[str, Any] ):
try:
return func(*__lowercase , **__lowercase )
except HTTPError as err:
if str(__lowercase ).startswith('500' ) or str(__lowercase ).startswith('502' ):
pytest.xfail(str(__lowercase ) )
raise err
return decorator.decorator(_wrapper , __lowercase )
class snake_case :
"""simple docstring"""
def __init__( self : int , __A : Any , __A : str , __A : List[Any] ):
__UpperCamelCase = returncode
__UpperCamelCase = stdout
__UpperCamelCase = stderr
async def lowercase__ ( __lowercase : Any , __lowercase : Optional[int] ) -> str:
"""simple docstring"""
while True:
__UpperCamelCase = await stream.readline()
if line:
callback(__lowercase )
else:
break
async def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None , __lowercase : Any=None , __lowercase : Optional[Any]=None , __lowercase : int=False , __lowercase : List[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('\nRunning: ' , ' '.join(__lowercase ) )
__UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowercase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__UpperCamelCase = []
__UpperCamelCase = []
def tee(__lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : Tuple="" ):
__UpperCamelCase = line.decode('utf-8' ).rstrip()
sink.append(__lowercase )
if not quiet:
print(__lowercase , __lowercase , file=__lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __lowercase : tee(__lowercase , __lowercase , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda __lowercase : tee(__lowercase , __lowercase , sys.stderr , label='stderr:' ) ),
] , timeout=__lowercase , )
return _RunOutput(await p.wait() , __lowercase , __lowercase )
def lowercase__ ( __lowercase : Dict , __lowercase : Any=None , __lowercase : int=None , __lowercase : int=180 , __lowercase : int=False , __lowercase : str=True ) -> _RunOutput:
"""simple docstring"""
__UpperCamelCase = asyncio.get_event_loop()
__UpperCamelCase = loop.run_until_complete(
_stream_subprocess(__lowercase , env=__lowercase , stdin=__lowercase , timeout=__lowercase , quiet=__lowercase , echo=__lowercase ) )
__UpperCamelCase = ' '.join(__lowercase )
if result.returncode > 0:
__UpperCamelCase = '\n'.join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' )
return result
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__UpperCamelCase = re.sub(R'^gw' , '' , __lowercase , 0 , re.M )
return int(__lowercase )
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = 29500
__UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 53
| 0
|
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase__ ( _A , _A , _A , _A , _A ):
with open(__lowercase ) as metadata_file:
a : List[Any] = json.load(__lowercase )
a : List[str] = LukeConfig(use_entity_aware_attention=__lowercase , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
a : Optional[int] = torch.load(__lowercase , map_location='cpu' )
# Load the entity vocab file
a : Optional[Any] = load_entity_vocab(__lowercase )
a : Optional[int] = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
a : Tuple = AddedToken('<ent>' , lstrip=__lowercase , rstrip=__lowercase )
a : Any = AddedToken('<ent2>' , lstrip=__lowercase , rstrip=__lowercase )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(__lowercase , __lowercase )
a : Any = LukeTokenizer.from_pretrained(__lowercase )
# Initialize the embeddings of the special tokens
a : str = state_dict['embeddings.word_embeddings.weight']
a : Any = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
a : Any = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
a : List[Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a : List[Any] = f"""encoder.layer.{layer_index}.attention.self."""
a : str = state_dict[prefix + matrix_name]
a : Tuple = state_dict[prefix + matrix_name]
a : List[str] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a : Tuple = state_dict['entity_embeddings.entity_embeddings.weight']
a : Tuple = entity_emb[entity_vocab['[MASK]']]
a : Optional[int] = LukeModel(config=__lowercase ).eval()
a , a : Any = model.load_state_dict(__lowercase , strict=__lowercase )
if not (len(__lowercase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"""Missing keys {", ".join(__lowercase )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
f""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
a : Dict = LukeTokenizer.from_pretrained(__lowercase , task='entity_classification' )
a : str = (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
a : Union[str, Any] = (39, 42)
a : Optional[int] = tokenizer(__lowercase , entity_spans=[span] , add_prefix_space=__lowercase , return_tensors='pt' )
a : List[Any] = model(**__lowercase )
# Verify word hidden states
if model_size == "large":
a : List[str] = torch.Size((1, 42, 1024) )
a : Dict = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
a : Dict = torch.Size((1, 42, 768) )
a : List[Any] = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
a : Optional[Any] = torch.Size((1, 1, 1024) )
a : int = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
a : Optional[Any] = torch.Size((1, 1, 768) )
a : Any = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowercase , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(__lowercase ) )
model.save_pretrained(__lowercase )
def lowerCamelCase__ ( _A ):
a : Any = {}
with open(__lowercase , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(__lowercase ):
a , a : int = line.rstrip().split('\t' )
a : Dict = index
return entity_vocab
if __name__ == "__main__":
lowerCAmelCase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
lowerCAmelCase: str = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 297
|
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
a__ : Tuple ='''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 53
| 0
|
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __A ( unittest.TestCase ):
@require_torch
def __A ( self ):
_lowerCAmelCase : Optional[int] = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
_lowerCAmelCase : Dict = load_dataset("""ashraq/esc50""" )
_lowerCAmelCase : List[Any] = dataset["""train"""]["""audio"""][-1]["""array"""]
_lowerCAmelCase : List[str] = audio_classifier(__A , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(__A ) , [{"""score""": 0.5_0_1, """label""": """Sound of a dog"""}, {"""score""": 0.4_9_9, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def __A ( self ):
pass
@slow
@require_torch
def __A ( self ):
_lowerCAmelCase : str = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
_lowerCAmelCase : List[Any] = load_dataset("""ashraq/esc50""" )
_lowerCAmelCase : Any = dataset["""train"""]["""audio"""][-1]["""array"""]
_lowerCAmelCase : Union[str, Any] = audio_classifier(__A , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(__A ) , [
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
] , )
_lowerCAmelCase : Union[str, Any] = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(__A ) , [
[
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
_lowerCAmelCase : Dict = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(__A ) , [
[
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def __A ( self ):
pass
| 44
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Tuple ) -> Tuple:
"""simple docstring"""
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[str] , __lowercase : List[str]="attention" ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
__UpperCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
__UpperCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
__UpperCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
__UpperCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowercase__ ( __lowercase : Tuple , __lowercase : Dict , __lowercase : int , __lowercase : List[Any]=False ) -> Optional[Any]:
"""simple docstring"""
if split_mlp_wi:
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
__UpperCamelCase = (wi_a, wi_a)
else:
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Optional[int] ) -> str:
"""simple docstring"""
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def lowercase__ ( __lowercase : dict , *, __lowercase : int , __lowercase : bool , __lowercase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = traverse_util.flatten_dict(variables['target'] )
__UpperCamelCase = {'/'.join(__lowercase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__UpperCamelCase = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , __lowercase )
__UpperCamelCase = collections.OrderedDict()
# Shared embeddings.
__UpperCamelCase = old['token_embedder/embedding']
# Encoder.
for i in range(__lowercase ):
# Block i, layer 0 (Self Attention).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'encoder' , 'attention' )
__UpperCamelCase = layer_norm
__UpperCamelCase = k.T
__UpperCamelCase = o.T
__UpperCamelCase = q.T
__UpperCamelCase = v.T
# Block i, layer 1 (MLP).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_mlp_layer_norm' )
__UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'encoder' , __lowercase )
__UpperCamelCase = layer_norm
if split_mlp_wi:
__UpperCamelCase = wi[0].T
__UpperCamelCase = wi[1].T
else:
__UpperCamelCase = wi.T
__UpperCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCamelCase = tax_relpos_bias_lookup(
__lowercase , __lowercase , 'encoder' ).T
__UpperCamelCase = old['encoder/encoder_norm/scale']
if not scalable_attention:
__UpperCamelCase = tax_relpos_bias_lookup(
__lowercase , 0 , 'encoder' ).T
__UpperCamelCase = tax_relpos_bias_lookup(
__lowercase , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowercase ):
# Block i, layer 0 (Self Attention).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_self_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'self_attention' )
__UpperCamelCase = layer_norm
__UpperCamelCase = k.T
__UpperCamelCase = o.T
__UpperCamelCase = q.T
__UpperCamelCase = v.T
# Block i, layer 1 (Cross Attention).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_cross_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'encoder_decoder_attention' )
__UpperCamelCase = layer_norm
__UpperCamelCase = k.T
__UpperCamelCase = o.T
__UpperCamelCase = q.T
__UpperCamelCase = v.T
# Block i, layer 2 (MLP).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_mlp_layer_norm' )
__UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'decoder' , __lowercase )
__UpperCamelCase = layer_norm
if split_mlp_wi:
__UpperCamelCase = wi[0].T
__UpperCamelCase = wi[1].T
else:
__UpperCamelCase = wi.T
__UpperCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCamelCase = tax_relpos_bias_lookup(__lowercase , __lowercase , 'decoder' ).T
__UpperCamelCase = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__UpperCamelCase = old['decoder/logits_dense/kernel'].T
return new
def lowercase__ ( __lowercase : Optional[Any] , __lowercase : bool ) -> int:
"""simple docstring"""
__UpperCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__UpperCamelCase = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__UpperCamelCase = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
__UpperCamelCase = state_dict['shared.weight']
return state_dict
def lowercase__ ( __lowercase : List[str] , __lowercase : Dict , __lowercase : str , __lowercase : int , __lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = checkpoints.load_tax_checkpoint(__lowercase )
__UpperCamelCase = convert_tax_to_pytorch(
__lowercase , num_layers=config.num_layers , is_encoder_only=__lowercase , scalable_attention=__lowercase )
__UpperCamelCase = make_state_dict(__lowercase , __lowercase )
model.load_state_dict(__lowercase , strict=__lowercase )
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : bool = False , __lowercase : bool = False , ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = MTaConfig.from_json_file(__lowercase )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__UpperCamelCase = UMTaEncoderModel(__lowercase )
else:
__UpperCamelCase = UMTaForConditionalGeneration(__lowercase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__lowercase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowercase )
print('Done' )
if __name__ == "__main__":
a__ : List[Any] =argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
a__ : List[str] =parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 53
| 0
|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _snake_case ( ):
UpperCAmelCase : Union[str, Any] = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"""
UpperCAmelCase : Optional[int] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert("""RGB""" )
return image
def _snake_case ( UpperCamelCase : Optional[Any] ):
UpperCAmelCase : List[Any] = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") )
# fmt: on
return rename_keys
def _snake_case ( UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : Any ):
UpperCAmelCase : Any = dct.pop(__lowercase )
UpperCAmelCase : Dict = val
def _snake_case ( UpperCamelCase : str , UpperCamelCase : Tuple ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase : Any = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" )
UpperCAmelCase : Tuple = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
UpperCAmelCase : Optional[Any] = torch.cat((q_bias, torch.zeros_like(__lowercase , requires_grad=__lowercase ), v_bias) )
UpperCAmelCase : List[str] = qkv_bias
def _snake_case ( UpperCamelCase : int ):
UpperCAmelCase : List[str] = 364 if """coco""" in model_name else 224
UpperCAmelCase : Optional[Any] = InstructBlipVisionConfig(image_size=__lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
UpperCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase : Union[str, Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
UpperCAmelCase : List[Any] = LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""" , vocab_size=32001 ).to_dict()
elif "vicuna-13b" in model_name:
UpperCAmelCase : int = LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""" , vocab_size=32001 ).to_dict()
else:
raise ValueError("""Model name not supported""" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
UpperCAmelCase : Union[str, Any] = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict()
UpperCAmelCase : Tuple = InstructBlipConfig(vision_config=__lowercase , text_config=__lowercase , qformer_config=__lowercase )
return config, image_size
@torch.no_grad()
def _snake_case ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict=None , UpperCamelCase : Dict=False ):
UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-uncased""" , truncation_side="""left""" )
qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} )
if "t5" in model_name:
UpperCAmelCase : int = TaTokenizerFast.from_pretrained("""google/flan-t5-xl""" , truncation_side="""left""" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
UpperCAmelCase : Union[str, Any] = LlamaTokenizerFast.from_pretrained(
"""huggyllama/llama-7b""" , truncation_side="""left""" , bos_token="""</s>""" , unk_token="""</s>""" )
tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} )
UpperCAmelCase , UpperCAmelCase : Tuple = get_blipa_config(__lowercase )
UpperCAmelCase : str = InstructBlipForConditionalGeneration(__lowercase ).eval()
UpperCAmelCase : List[Any] = {
"""instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""),
"""instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""),
"""instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""),
"""instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""),
}
UpperCAmelCase , UpperCAmelCase : int = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
UpperCAmelCase : Union[str, Any] = """cuda:1""" if torch.cuda.is_available() else """cpu"""
UpperCAmelCase : Any = """cuda:2""" if torch.cuda.is_available() else """cpu"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = load_model_and_preprocess(
name=__lowercase , model_type=__lowercase , is_eval=__lowercase , device=__lowercase )
original_model.eval()
print("""Done!""" )
# update state dict keys
UpperCAmelCase : str = original_model.state_dict()
UpperCAmelCase : int = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase : Any = state_dict.pop(__lowercase )
if key.startswith("""Qformer.bert""" ):
UpperCAmelCase : List[str] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
UpperCAmelCase : List[Any] = key.replace("""self""" , """attention""" )
if "llm_proj" in key:
UpperCAmelCase : Optional[int] = key.replace("""llm_proj""" , """language_projection""" )
if "t5_proj" in key:
UpperCAmelCase : Optional[Any] = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""llm_model""" ):
UpperCAmelCase : Optional[int] = key.replace("""llm_model""" , """language_model""" )
if key.startswith("""t5""" ):
UpperCAmelCase : int = key.replace("""t5""" , """language""" )
UpperCAmelCase : Optional[int] = val
# read in qv biases
read_in_q_v_bias(__lowercase , __lowercase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(__lowercase , strict=__lowercase )
UpperCAmelCase : int = load_demo_image()
UpperCAmelCase : Dict = """What is unusual about this image?"""
# create processor
UpperCAmelCase : Union[str, Any] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=__lowercase , image_std=__lowercase )
UpperCAmelCase : Union[str, Any] = InstructBlipProcessor(
image_processor=__lowercase , tokenizer=__lowercase , qformer_tokenizer=__lowercase , )
UpperCAmelCase : Tuple = processor(images=__lowercase , text=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# make sure processor creates exact same pixel values
UpperCAmelCase : Dict = vis_processors["""eval"""](__lowercase ).unsqueeze(0 ).to(__lowercase )
UpperCAmelCase : Optional[int] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __lowercase )
original_model.to(__lowercase )
hf_model.to(__lowercase )
with torch.no_grad():
if "vicuna" in model_name:
UpperCAmelCase : List[str] = original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits
UpperCAmelCase : Any = hf_model(**__lowercase ).logits
else:
UpperCAmelCase : List[str] = original_model(
{"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits
UpperCAmelCase : Any = tokenizer("""\n""" , return_tensors="""pt""" ).input_ids.to(__lowercase )
UpperCAmelCase : Optional[Any] = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
UpperCAmelCase : Optional[int] = hf_model(**__lowercase , labels=__lowercase ).logits
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
UpperCAmelCase : List[str] = 1e-4 if """vicuna""" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , __lowercase , atol=__lowercase )
print("""Looks ok!""" )
print("""Generating with original model...""" )
UpperCAmelCase : Optional[int] = original_model.generate({"""image""": original_pixel_values, """prompt""": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("""Generating with HF model...""" )
UpperCAmelCase : Any = hf_model.generate(
**__lowercase , do_sample=__lowercase , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
UpperCAmelCase : List[str] = 2
print("""Original generation:""" , __lowercase )
UpperCAmelCase : Any = processor.batch_decode(__lowercase , skip_special_tokens=__lowercase )
UpperCAmelCase : Optional[Any] = [text.strip() for text in output_text]
print("""HF generation:""" , __lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowercase )
hf_model.save_pretrained(__lowercase )
if push_to_hub:
processor.push_to_hub(F"Salesforce/{model_name}" )
hf_model.push_to_hub(F"Salesforce/{model_name}" )
if __name__ == "__main__":
A: str = argparse.ArgumentParser()
A: List[str] = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
A: Any = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 109
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : List[Any] ="BlipImageProcessor"
SCREAMING_SNAKE_CASE_ : Optional[int] =("BertTokenizer", "BertTokenizerFast")
def __init__( self : Dict , __A : Optional[int] , __A : List[Any] ):
__UpperCamelCase = False
super().__init__(__A , __A )
__UpperCamelCase = self.image_processor
def __call__( self : List[Any] , __A : ImageInput = None , __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = None , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : List[Any] , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
__UpperCamelCase = self.tokenizer
__UpperCamelCase = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
return text_encoding
# add pixel_values
__UpperCamelCase = self.image_processor(__A , return_tensors=__A )
if text is not None:
__UpperCamelCase = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
else:
__UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(__A )
return encoding_image_processor
def _lowerCamelCase ( self : List[Any] , *__A : Dict , **__A : Optional[int] ):
return self.tokenizer.batch_decode(*__A , **__A )
def _lowerCamelCase ( self : List[Any] , *__A : List[str] , **__A : Dict ):
return self.tokenizer.decode(*__A , **__A )
@property
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = self.tokenizer.model_input_names
__UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 53
| 0
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class lowerCamelCase__:
UpperCAmelCase__ : Optional[Any] = MBartConfig
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[str] = "gelu"
def __init__( self: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: int=13 , UpperCamelCase_: Optional[Any]=7 , UpperCamelCase_: Dict=True , UpperCamelCase_: List[Any]=False , UpperCamelCase_: Dict=99 , UpperCamelCase_: Optional[Any]=32 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: int=4 , UpperCamelCase_: List[str]=37 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: str=20 , UpperCamelCase_: Tuple=2 , UpperCamelCase_: Tuple=1 , UpperCamelCase_: Optional[int]=0 , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = bos_token_id
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowerCamelCase = prepare_mbart_inputs_dict(__A , __A , __A )
return config, inputs_dict
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = TFMBartModel(config=__A ).get_decoder()
__lowerCamelCase = inputs_dict["""input_ids"""]
__lowerCamelCase = input_ids[:1, :]
__lowerCamelCase = inputs_dict["""attention_mask"""][:1, :]
__lowerCamelCase = inputs_dict["""head_mask"""]
__lowerCamelCase = 1
# first forward pass
__lowerCamelCase = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
__lowerCamelCase, __lowerCamelCase = outputs.to_tuple()
__lowerCamelCase = past_key_values[1]
def lowerCamelCase__ ( A__ : str , A__ : List[Any] , A__ : Optional[int] , A__ : Union[str, Any]=None , A__ : List[str]=None , A__ : Dict=None , A__ : Dict=None , A__ : List[Any]=None , ):
'''simple docstring'''
if attention_mask is None:
__lowerCamelCase = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Tuple = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCAmelCase__ : str = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ : List[Any] = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Optional[int] = False
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[int] ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = TFMBartModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__A )
def lowerCAmelCase__ ( self: Tuple ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__A )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : Optional[int] = [
" UN Chief Says There Is No Military Solution in Syria",
]
UpperCAmelCase__ : Tuple = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
UpperCAmelCase__ : Tuple = "facebook/mbart-large-en-ro"
@cached_property
def lowerCAmelCase__ ( self: Optional[int] ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCAmelCase__ ( self: List[Any] , **UpperCamelCase_: Dict ):
__lowerCamelCase = self.translate_src_text(**__A )
self.assertListEqual(self.expected_text , __A )
def lowerCAmelCase__ ( self: Dict , **UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = self.tokenizer(self.src_text , **__A , return_tensors="""tf""" )
__lowerCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__lowerCamelCase = self.tokenizer.batch_decode(__A , skip_special_tokens=__A )
return generated_words
@slow
def lowerCAmelCase__ ( self: Tuple ):
self._assert_generated_batch_equal_expected()
| 12
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
pass
class snake_case :
"""simple docstring"""
def __init__( self : List[Any] , __A : Any ):
__UpperCamelCase = data
__UpperCamelCase = None
def __iter__( self : Optional[Any] ):
__UpperCamelCase = self
__UpperCamelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__A )
yield node.data
__UpperCamelCase = node.next_node
@property
def _lowerCamelCase ( self : List[str] ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
a__ : Dict =Node(1)
a__ : Optional[int] =Node(2)
a__ : List[str] =Node(3)
a__ : Optional[int] =Node(4)
print(root_node.has_loop) # False
a__ : str =root_node.next_node
print(root_node.has_loop) # True
a__ : Optional[int] =Node(5)
a__ : List[Any] =Node(6)
a__ : int =Node(5)
a__ : Tuple =Node(6)
print(root_node.has_loop) # False
a__ : str =Node(1)
print(root_node.has_loop) # False
| 53
| 0
|
import math
def A ( _lowercase , _lowercase = 0 , _lowercase = 0 ):
SCREAMING_SNAKE_CASE : int = end or len(__lowercase )
for i in range(__lowercase , __lowercase ):
SCREAMING_SNAKE_CASE : List[str] = i
SCREAMING_SNAKE_CASE : Union[str, Any] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
SCREAMING_SNAKE_CASE : List[str] = array[temp_index - 1]
temp_index -= 1
SCREAMING_SNAKE_CASE : List[str] = temp_index_value
return array
def A ( _lowercase , _lowercase , _lowercase ): # Max Heap
SCREAMING_SNAKE_CASE : List[str] = index
SCREAMING_SNAKE_CASE : List[Any] = 2 * index + 1 # Left Node
SCREAMING_SNAKE_CASE : Optional[Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
SCREAMING_SNAKE_CASE : List[Any] = left_index
if right_index < heap_size and array[largest] < array[right_index]:
SCREAMING_SNAKE_CASE : Optional[int] = right_index
if largest != index:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = array[largest], array[index]
heapify(__lowercase , __lowercase , __lowercase )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = len(__lowercase )
for i in range(n // 2 , -1 , -1 ):
heapify(__lowercase , __lowercase , __lowercase )
for i in range(n - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = array[0], array[i]
heapify(__lowercase , 0 , __lowercase )
return array
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = low
SCREAMING_SNAKE_CASE : Optional[Any] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = array[j], array[i]
i += 1
def A ( _lowercase ):
if len(__lowercase ) == 0:
return array
SCREAMING_SNAKE_CASE : Tuple = 2 * math.ceil(math.loga(len(__lowercase ) ) )
SCREAMING_SNAKE_CASE : int = 16
return intro_sort(__lowercase , 0 , len(__lowercase ) , __lowercase , __lowercase )
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(__lowercase )
max_depth -= 1
SCREAMING_SNAKE_CASE : int = median_of_a(__lowercase , __lowercase , start + ((end - start) // 2) + 1 , end - 1 )
SCREAMING_SNAKE_CASE : Any = partition(__lowercase , __lowercase , __lowercase , __lowercase )
intro_sort(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE : List[str] = p
return insertion_sort(__lowercase , __lowercase , __lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : Dict = input('Enter numbers separated by a comma : ').strip()
__UpperCamelCase : Optional[int] = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 182
|
'''simple docstring'''
a__ : Optional[Any] =256
# Modulus to hash a string
a__ : Dict =1_000_003
def lowercase__ ( __lowercase : str , __lowercase : str ) -> bool:
"""simple docstring"""
__UpperCamelCase = len(__lowercase )
__UpperCamelCase = len(__lowercase )
if p_len > t_len:
return False
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(__lowercase ):
__UpperCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCamelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCamelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowercase__ ( ) -> None:
"""simple docstring"""
__UpperCamelCase = 'abc1abc12'
__UpperCamelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__UpperCamelCase = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(__lowercase , __lowercase ) and not rabin_karp(__lowercase , __lowercase )
# Test 2)
__UpperCamelCase = 'ABABX'
__UpperCamelCase = 'ABABZABABYABABX'
assert rabin_karp(__lowercase , __lowercase )
# Test 3)
__UpperCamelCase = 'AAAB'
__UpperCamelCase = 'ABAAAAAB'
assert rabin_karp(__lowercase , __lowercase )
# Test 4)
__UpperCamelCase = 'abcdabcy'
__UpperCamelCase = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(__lowercase , __lowercase )
# Test 5)
__UpperCamelCase = 'Lü'
__UpperCamelCase = 'Lüsai'
assert rabin_karp(__lowercase , __lowercase )
__UpperCamelCase = 'Lue'
assert not rabin_karp(__lowercase , __lowercase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 53
| 0
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowerCamelCase ) , "Tatoeba directory does not exist." )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> List[str]:
__UpperCamelCase =tempfile.mkdtemp()
return TatoebaConverter(save_dir=__A )
@slow
def _a ( self ) -> int:
self.resolver.convert_models(['heb-eng'] )
@slow
def _a ( self ) -> List[str]:
__UpperCamelCase , __UpperCamelCase =self.resolver.write_model_card('opus-mt-he-en' , dry_run=__A )
assert mmeta["long_pair"] == "heb-eng"
| 62
|
'''simple docstring'''
from __future__ import annotations
class snake_case :
"""simple docstring"""
def __init__( self : Optional[int] , __A : list[list[int]] ):
__UpperCamelCase = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(__A ) != 0:
__UpperCamelCase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__A ) != cols:
raise error
for value in row:
if not isinstance(__A , (int, float) ):
raise error
__UpperCamelCase = rows
else:
__UpperCamelCase = []
def _lowerCamelCase ( self : int ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _lowerCamelCase ( self : str ):
return len(self.rows )
@property
def _lowerCamelCase ( self : Any ):
return len(self.rows[0] )
@property
def _lowerCamelCase ( self : Optional[Any] ):
return (self.num_rows, self.num_columns)
@property
def _lowerCamelCase ( self : Dict ):
return self.order[0] == self.order[1]
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__A )
def _lowerCamelCase ( self : Any ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _lowerCamelCase ( self : List[str] ):
return bool(self.determinant() )
def _lowerCamelCase ( self : Dict , __A : int , __A : int ):
__UpperCamelCase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__A ).determinant()
def _lowerCamelCase ( self : Dict , __A : int , __A : int ):
if (row + column) % 2 == 0:
return self.get_minor(__A , __A )
return -1 * self.get_minor(__A , __A )
def _lowerCamelCase ( self : List[str] ):
return Matrix(
[
[self.get_minor(__A , __A ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _lowerCamelCase ( self : Union[str, Any] ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__A )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self : Optional[Any] ):
return str(self.rows )
def __str__( self : Union[str, Any] ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(__A ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def _lowerCamelCase ( self : List[Any] , __A : list[int] , __A : int | None = None ):
__UpperCamelCase = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(__A , __A ):
raise type_error
for value in row:
if not isinstance(__A , (int, float) ):
raise type_error
if len(__A ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(__A )
else:
__UpperCamelCase = self.rows[0:position] + [row] + self.rows[position:]
def _lowerCamelCase ( self : Optional[Any] , __A : list[int] , __A : int | None = None ):
__UpperCamelCase = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(__A , __A ):
raise type_error
for value in column:
if not isinstance(__A , (int, float) ):
raise type_error
if len(__A ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
__UpperCamelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
__UpperCamelCase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Tuple , __A : object ):
if not isinstance(__A , __A ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , __A : object ):
return not self == other
def __neg__( self : List[Any] ):
return self * -1
def __add__( self : List[str] , __A : Matrix ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : str , __A : Matrix ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : str , __A : Matrix | int | float ):
if isinstance(__A , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__A , __A ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(__A , __A ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self : Union[str, Any] , __A : int ):
if not isinstance(__A , __A ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
__UpperCamelCase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _lowerCamelCase ( cls : Tuple , __A : list[int] , __A : list[int] ):
return sum(row[i] * column[i] for i in range(len(__A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 0
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 100_0000 ) -> int:
_a = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __lowercase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 63
|
'''simple docstring'''
import os
import numpy
import onnx
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__UpperCamelCase = a.name
__UpperCamelCase = b.name
__UpperCamelCase = ''
__UpperCamelCase = ''
__UpperCamelCase = a == b
__UpperCamelCase = name_a
__UpperCamelCase = name_b
return res
def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : List[Any] ) -> Optional[int]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__lowercase , __lowercase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase )
_graph_replace_input_with(node_proto.attribute[1].g , __lowercase , __lowercase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase )
def lowercase__ ( __lowercase : int , __lowercase : List[Any] , __lowercase : Dict ) -> int:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(__lowercase , __lowercase , __lowercase )
def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : str ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = list(model.graph.initializer )
__UpperCamelCase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__UpperCamelCase = inits[i].name
__UpperCamelCase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __lowercase , __lowercase )
def lowercase__ ( __lowercase : Dict ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = os.path.dirname(__lowercase )
__UpperCamelCase = os.path.basename(__lowercase )
__UpperCamelCase = onnx.load(os.path.join(__lowercase , __lowercase ) )
__UpperCamelCase = list(model.graph.initializer )
__UpperCamelCase = set()
__UpperCamelCase = {}
__UpperCamelCase = []
__UpperCamelCase = 0
for i in range(len(__lowercase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__lowercase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__lowercase )
dup_set.add(__lowercase )
__UpperCamelCase = inits[j].data_type
__UpperCamelCase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , __lowercase )
total_reduced_size += mem_size
__UpperCamelCase = inits[i].name
__UpperCamelCase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__lowercase )
else:
__UpperCamelCase = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
__UpperCamelCase = sorted(__lowercase )
_remove_dup_initializers_from_model(__lowercase , __lowercase , __lowercase )
__UpperCamelCase = 'optimized_' + model_file_name
__UpperCamelCase = os.path.join(__lowercase , __lowercase )
onnx.save(__lowercase , __lowercase )
return new_model
| 53
| 0
|
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 234
|
'''simple docstring'''
import random
def lowercase__ ( __lowercase : list , __lowercase : Optional[Any] ) -> tuple:
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = [], [], []
for element in data:
if element < pivot:
less.append(__lowercase )
elif element > pivot:
greater.append(__lowercase )
else:
equal.append(__lowercase )
return less, equal, greater
def lowercase__ ( __lowercase : list , __lowercase : int ) -> Dict:
"""simple docstring"""
if index >= len(__lowercase ) or index < 0:
return None
__UpperCamelCase = items[random.randint(0 , len(__lowercase ) - 1 )]
__UpperCamelCase = 0
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = _partition(__lowercase , __lowercase )
__UpperCamelCase = len(__lowercase )
__UpperCamelCase = len(__lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__lowercase , __lowercase )
# must be in larger
else:
return quick_select(__lowercase , index - (m + count) )
| 53
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase__ : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
UpperCamelCase__ : Union[str, Any] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
UpperCamelCase__ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
with open(__lowercase , """rb""" ) as f:
A_ : List[str] = Image.open(__lowercase )
return im.convert("""RGB""" )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = field(
default=__lowerCamelCase, metadata={
'''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'''
}, )
lowerCamelCase = field(
default=__lowerCamelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCamelCase = field(default=__lowerCamelCase, metadata={'''help''': '''A folder containing the training data.'''} )
lowerCamelCase = field(default=__lowerCamelCase, metadata={'''help''': '''A folder containing the validation data.'''} )
lowerCamelCase = field(
default=0.1_5, metadata={'''help''': '''Percent to split off of train for validation.'''} )
lowerCamelCase = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
}, )
lowerCamelCase = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
}, )
def UpperCAmelCase_ ( self ) -> Dict:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"""You must specify either a dataset name from the hub or a train and/or validation directory.""" )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = field(
default='''google/vit-base-patch16-224-in21k''', metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''}, )
lowerCamelCase = field(
default=__lowerCamelCase, metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCamelCase )}, )
lowerCamelCase = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase = field(
default=__lowerCamelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
lowerCamelCase = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
lowerCamelCase = field(default=__lowerCamelCase, metadata={'''help''': '''Name or path of preprocessor config.'''} )
lowerCamelCase = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
lowerCamelCase = field(
default=__lowerCamelCase, metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''}, )
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
A_ : Union[str, Any] = torch.stack([example["""pixel_values"""] for example in examples] )
A_ : List[str] = torch.tensor([example["""labels"""] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
A_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A_ , A_ , A_ : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A_ , A_ , A_ : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_image_classification""" , __lowercase , __lowercase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A_ : str = training_args.get_process_log_level()
logger.setLevel(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
A_ : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A_ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
A_ : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="""image-classification""" , use_auth_token=True if model_args.use_auth_token else None , )
else:
A_ : Optional[int] = {}
if data_args.train_dir is not None:
A_ : Optional[Any] = os.path.join(data_args.train_dir , """**""" )
if data_args.validation_dir is not None:
A_ : Any = os.path.join(data_args.validation_dir , """**""" )
A_ : Any = load_dataset(
"""imagefolder""" , data_files=__lowercase , cache_dir=model_args.cache_dir , task="""image-classification""" , )
# If we don't have a validation split, split off a percentage of train as validation.
A_ : Optional[int] = None if """validation""" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0:
A_ : Optional[int] = dataset["""train"""].train_test_split(data_args.train_val_split )
A_ : Union[str, Any] = split["""train"""]
A_ : Union[str, Any] = split["""test"""]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
A_ : Union[str, Any] = dataset["""train"""].features["""labels"""].names
A_ , A_ : List[Any] = {}, {}
for i, label in enumerate(__lowercase ):
A_ : Dict = str(__lowercase )
A_ : Optional[int] = label
# Load the accuracy metric from the datasets package
A_ : int = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a_ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
A_ : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowercase ) , labelaid=__lowercase , idalabel=__lowercase , finetuning_task="""image-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A_ : Dict = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
A_ : Union[str, Any] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
A_ : Optional[int] = image_processor.size["""shortest_edge"""]
else:
A_ : Union[str, Any] = (image_processor.size["""height"""], image_processor.size["""width"""])
A_ : Union[str, Any] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
A_ : Optional[Any] = Compose(
[
RandomResizedCrop(__lowercase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
A_ : Any = Compose(
[
Resize(__lowercase ),
CenterCrop(__lowercase ),
ToTensor(),
normalize,
] )
def train_transforms(a_ ):
A_ : Any = [
_train_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]
]
return example_batch
def val_transforms(a_ ):
A_ : Any = [_val_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
A_ : str = (
dataset["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__lowercase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
A_ : List[str] = (
dataset["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__lowercase )
# Initalize our trainer
A_ : Tuple = Trainer(
model=__lowercase , args=__lowercase , train_dataset=dataset["""train"""] if training_args.do_train else None , eval_dataset=dataset["""validation"""] if training_args.do_eval else None , compute_metrics=__lowercase , tokenizer=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
A_ : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
A_ : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A_ : List[str] = last_checkpoint
A_ : Dict = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A_ : Optional[int] = trainer.evaluate()
trainer.log_metrics("""eval""" , __lowercase )
trainer.save_metrics("""eval""" , __lowercase )
# Write model card and (optionally) push to hub
A_ : Tuple = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """image-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""image-classification""", """vision"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
if __name__ == "__main__":
main()
| 344
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase__ ( __lowercase : Any ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def lowercase__ ( __lowercase : Tuple ) -> int:
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase = emb.weight.shape
__UpperCamelCase = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
__UpperCamelCase = emb.weight.data
return lin_layer
def lowercase__ ( __lowercase : int , __lowercase : List[str]="facebook/mbart-large-en-ro" , __lowercase : str=False , __lowercase : List[Any]=False ) -> int:
"""simple docstring"""
__UpperCamelCase = torch.load(__lowercase , map_location='cpu' )['model']
remove_ignore_keys_(__lowercase )
__UpperCamelCase = state_dict['encoder.embed_tokens.weight'].shape[0]
__UpperCamelCase = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase )
if mbart_aa and finetuned:
__UpperCamelCase = 'relu'
__UpperCamelCase = state_dict['decoder.embed_tokens.weight']
__UpperCamelCase = MBartForConditionalGeneration(__lowercase )
model.model.load_state_dict(__lowercase )
if finetuned:
__UpperCamelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a__ : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
a__ : Union[str, Any] =parser.parse_args()
a__ : str =convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 53
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowercase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 151
|
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Any , __A : Dict , __A : str , __A : List[Any]=1_0_2_4 , __A : Tuple=1_0_2_4 , __A : str=3.6 ):
__UpperCamelCase = tokenizer
__UpperCamelCase = tokenizer.bos_token_id
__UpperCamelCase = dataset
__UpperCamelCase = seq_length
__UpperCamelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Any ):
__UpperCamelCase = iter(self.dataset )
__UpperCamelCase = True
while more_examples:
__UpperCamelCase , __UpperCamelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__A )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
__UpperCamelCase = False
break
__UpperCamelCase = tokenizer(__A , truncation=__A )['input_ids']
__UpperCamelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__A ) , self.seq_length ):
__UpperCamelCase = all_token_ids[i : i + self.seq_length]
if len(__A ) == self.seq_length:
yield torch.tensor(__A )
def lowercase__ ( __lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = {'streaming': True}
__UpperCamelCase = load_dataset(args.dataset_name , split='train' , **__lowercase )
__UpperCamelCase = ConstantLengthDataset(__lowercase , __lowercase , seq_length=args.seq_length )
__UpperCamelCase = DataLoader(__lowercase , batch_size=args.batch_size )
return eval_dataloader
def lowercase__ ( __lowercase : Tuple ) -> Optional[Any]:
"""simple docstring"""
model.eval()
__UpperCamelCase = []
for step, batch in enumerate(__lowercase ):
with torch.no_grad():
__UpperCamelCase = model(__lowercase , labels=__lowercase )
__UpperCamelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__lowercase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__UpperCamelCase = torch.mean(torch.cat(__lowercase ) )
try:
__UpperCamelCase = torch.exp(__lowercase )
except OverflowError:
__UpperCamelCase = float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
a__ : int =Accelerator()
# Parse configuration
a__ : Dict =HfArgumentParser(EvaluationArguments)
a__ : Union[str, Any] =parser.parse_args()
set_seed(args.seed)
# Logging
a__ : List[Any] =logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
a__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(args.model_ckpt)
a__ : List[Any] =AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
a__ : Union[str, Any] =create_dataloader(args)
# Prepare everything with our `accelerator`.
a__ , a__ : List[str] =accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
a__ , a__ : Any =evaluate(args)
logger.info(f'loss/eval: {eval_loss}, perplexity: {perplexity}')
| 53
| 0
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =None
UpperCAmelCase_ =None
__UpperCAmelCase = namedtuple("CoinsDistribResult", "moves excess")
def A__ ( __lowerCamelCase ):
if root is None:
return 0
# Validation
def count_nodes(__lowerCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__lowerCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__lowercase ) != count_coins(__lowercase ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(__lowerCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0, 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_distrib(node.left )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_distrib(node.right )
SCREAMING_SNAKE_CASE_ = 1 - left_distrib_excess
SCREAMING_SNAKE_CASE_ = 1 - right_distrib_excess
SCREAMING_SNAKE_CASE_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(__lowercase )
+ abs(__lowercase )
)
SCREAMING_SNAKE_CASE_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__lowercase, __lowercase )
return get_distrib(__lowercase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
a__ : Any =logging.get_logger(__name__)
a__ : Optional[Any] ={
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict ="gpt_neo"
SCREAMING_SNAKE_CASE_ : Optional[int] =["past_key_values"]
SCREAMING_SNAKE_CASE_ : List[Any] ={"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : Union[str, Any] , __A : Union[str, Any]=5_0_2_5_7 , __A : Any=2_0_4_8 , __A : Optional[Any]=2_0_4_8 , __A : Any=2_4 , __A : Union[str, Any]=[[["global", "local"], 1_2]] , __A : str=1_6 , __A : Optional[int]=None , __A : Union[str, Any]=2_5_6 , __A : Any="gelu_new" , __A : Dict=0.0 , __A : Optional[int]=0.0 , __A : int=0.0 , __A : List[str]=0.1 , __A : Any=1e-5 , __A : int=0.02 , __A : List[str]=True , __A : Tuple=5_0_2_5_6 , __A : Optional[Any]=5_0_2_5_6 , **__A : Optional[Any] , ):
__UpperCamelCase = vocab_size
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = hidden_size
__UpperCamelCase = num_layers
__UpperCamelCase = num_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = window_size
__UpperCamelCase = activation_function
__UpperCamelCase = resid_dropout
__UpperCamelCase = embed_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = classifier_dropout
__UpperCamelCase = layer_norm_epsilon
__UpperCamelCase = initializer_range
__UpperCamelCase = use_cache
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
__UpperCamelCase = attention_types
__UpperCamelCase = self.expand_attention_types_params(__A )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
@staticmethod
def _lowerCamelCase ( __A : Tuple ):
__UpperCamelCase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase__ ( __lowercase : Tuple , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : List[str] ) -> Any:
"""simple docstring"""
import torch
__UpperCamelCase = input.size()
__UpperCamelCase = len(__lowercase )
__UpperCamelCase = shape[dimension]
__UpperCamelCase = torch.arange(0 , __lowercase , __lowercase )
__UpperCamelCase = torch.div(sizedim - size , __lowercase , rounding_mode='floor' ) + 1
__UpperCamelCase = torch.arange(__lowercase ) + low_indices[:min_length][:, None]
__UpperCamelCase = [slice(__lowercase )] * rank
__UpperCamelCase = indices
__UpperCamelCase = input[s]
__UpperCamelCase = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__lowercase )
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
import torch
__UpperCamelCase = torch.arange(1 , __lowercase )
__UpperCamelCase = torch.remainder(__lowercase , __lowercase )
__UpperCamelCase = remainders == 0
__UpperCamelCase = candidates[divisor_indices]
__UpperCamelCase = torch.max(__lowercase )
return largest_divisor, torch.div(__lowercase , __lowercase , rounding_mode='floor' )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
@property
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__A , direction='inputs' )
__UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def _lowerCamelCase ( self : int ):
return self._config.num_heads
def _lowerCamelCase ( self : List[str] , __A : PreTrainedTokenizer , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional[TensorType] = None , ):
__UpperCamelCase = super(__A , self ).generate_dummy_inputs(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
# We need to order the input in the way they appears in the forward()
__UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__UpperCamelCase , __UpperCamelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__UpperCamelCase = seqlen + 2
__UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
__UpperCamelCase = common_inputs['attention_mask']
if self.use_past:
__UpperCamelCase = ordered_inputs['attention_mask'].dtype
__UpperCamelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__A , __A , dtype=__A )] , dim=1 )
return ordered_inputs
@property
def _lowerCamelCase ( self : Dict ):
return 1_3
| 53
| 0
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCamelCase__ ( ):
a : str = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
a : Optional[Any] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('RGB' )
return image
def lowerCamelCase__ ( _A ):
a : Union[str, Any] = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def lowerCamelCase__ ( _A , _A , _A ):
a : Union[str, Any] = dct.pop(__lowercase )
a : str = val
def lowerCamelCase__ ( _A , _A ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
a : Any = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
a : int = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
a : Tuple = torch.cat((q_bias, torch.zeros_like(__lowercase , requires_grad=__lowercase ), v_bias) )
a : int = qkv_bias
def lowerCamelCase__ ( _A , _A ):
a : List[str] = 364 if 'coco' in model_name else 224
a : Dict = BlipaVisionConfig(image_size=__lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
a : Tuple = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=__lowercase ).to_dict()
elif "opt-6.7b" in model_name:
a : Dict = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=__lowercase ).to_dict()
elif "t5-xl" in model_name:
a : Union[str, Any] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
a : Optional[Any] = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
a : List[str] = BlipaConfig(vision_config=__lowercase , text_config=__lowercase )
return config, image_size
@torch.no_grad()
def lowerCamelCase__ ( _A , _A=None , _A=False ):
a : List[Any] = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
a : Tuple = tokenizer('\n' , add_special_tokens=__lowercase ).input_ids[0]
a , a : Optional[int] = get_blipa_config(__lowercase , eos_token_id=__lowercase )
a : Dict = BlipaForConditionalGeneration(__lowercase ).eval()
a : Union[str, Any] = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
a , a : Optional[int] = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
a : Optional[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
a , a , a : List[Any] = load_model_and_preprocess(
name=__lowercase , model_type=__lowercase , is_eval=__lowercase , device=__lowercase )
original_model.eval()
print('Done!' )
# update state dict keys
a : Optional[Any] = original_model.state_dict()
a : int = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
a : Dict = state_dict.pop(__lowercase )
if key.startswith('Qformer.bert' ):
a : int = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
a : Union[str, Any] = key.replace('self' , 'attention' )
if "opt_proj" in key:
a : Union[str, Any] = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
a : Dict = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
a : Optional[int] = key.replace('opt' , 'language' )
if key.startswith('t5' ):
a : int = key.replace('t5' , 'language' )
a : Union[str, Any] = val
# read in qv biases
read_in_q_v_bias(__lowercase , __lowercase )
a , a : Union[str, Any] = hf_model.load_state_dict(__lowercase , strict=__lowercase )
assert len(__lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
a : Union[str, Any] = load_demo_image()
a : Optional[int] = vis_processors['eval'](__lowercase ).unsqueeze(0 ).to(__lowercase )
a : Tuple = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(__lowercase )
# create processor
a : Optional[int] = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=__lowercase , image_std=__lowercase )
a : Optional[int] = BlipaProcessor(image_processor=__lowercase , tokenizer=__lowercase )
a : str = processor(images=__lowercase , return_tensors='pt' ).pixel_values.to(__lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowercase , __lowercase )
original_model.to(__lowercase )
hf_model.to(__lowercase )
with torch.no_grad():
if "opt" in model_name:
a : Union[str, Any] = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
a : str = hf_model(__lowercase , __lowercase ).logits
else:
a : Union[str, Any] = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
a : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
a : str = hf_model(__lowercase , __lowercase , labels=__lowercase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
a : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=__lowercase )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
a : Optional[int] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=__lowercase )
else:
# cast to same type
a : str = logits.dtype
assert torch.allclose(original_logits.to(__lowercase ) , __lowercase , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
a : Optional[int] = ''
a : int = tokenizer(__lowercase , return_tensors='pt' ).input_ids.to(__lowercase )
a : Optional[int] = original_model.generate({'image': original_pixel_values} )
a : Optional[int] = hf_model.generate(
__lowercase , __lowercase , do_sample=__lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , __lowercase )
a : List[Any] = input_ids.shape[1]
a : Any = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowercase )
a : Any = [text.strip() for text in output_text]
print('HF generation:' , __lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowercase )
hf_model.save_pretrained(__lowercase )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase: Any = argparse.ArgumentParser()
lowerCAmelCase: Optional[int] = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
lowerCAmelCase: Tuple = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 297
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="naver-clova-ix/donut-base-finetuned-docvqa"
SCREAMING_SNAKE_CASE_ : Dict =(
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
SCREAMING_SNAKE_CASE_ : List[str] ="document_qa"
SCREAMING_SNAKE_CASE_ : Union[str, Any] =AutoProcessor
SCREAMING_SNAKE_CASE_ : Union[str, Any] =VisionEncoderDecoderModel
SCREAMING_SNAKE_CASE_ : List[Any] =["image", "text"]
SCREAMING_SNAKE_CASE_ : Any =["text"]
def __init__( self : Optional[int] , *__A : List[str] , **__A : List[Any] ):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*__A , **__A )
def _lowerCamelCase ( self : Any , __A : "Image" , __A : str ):
__UpperCamelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__UpperCamelCase = task_prompt.replace('{user_input}' , __A )
__UpperCamelCase = self.pre_processor.tokenizer(
__A , add_special_tokens=__A , return_tensors='pt' ).input_ids
__UpperCamelCase = self.pre_processor(__A , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _lowerCamelCase ( self : Union[str, Any] , __A : Optional[Any] ):
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__A , ).sequences
def _lowerCamelCase ( self : Tuple , __A : List[Any] ):
__UpperCamelCase = self.pre_processor.batch_decode(__A )[0]
__UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
__UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
__UpperCamelCase = re.sub(R'<.*?>' , '' , __A , count=1 ).strip() # remove first task start token
__UpperCamelCase = self.pre_processor.tokenajson(__A )
return sequence["answer"]
| 53
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ) -> int:
while a != 0:
_lowerCAmelCase , _lowerCAmelCase : Dict = b % a, a
return b
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ) -> int:
if gcd(__lowercase ,__lowercase ) != 1:
_lowerCAmelCase : str = f"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(__lowercase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = 1, 0, a
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = 0, 1, m
while va != 0:
_lowerCAmelCase : str = ua // va
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 44
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 53
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A: int = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: str = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[Any] = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[int] = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: List[Any] = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
A: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 109
|
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowercase__ ( __lowercase : Features ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = np.inf
def set_batch_size(__lowercase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(__lowercase , __lowercase ):
__UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__lowercase , __lowercase ):
__UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__lowercase , __lowercase ) and feature.dtype == "binary":
__UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__lowercase , __lowercase )
return None if batch_size is np.inf else batch_size
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , __A : NestedDataStructureLike[PathLike] , __A : Optional[NamedSplit] = None , __A : Optional[Features] = None , __A : str = None , __A : bool = False , __A : bool = False , __A : Optional[int] = None , **__A : Dict , ):
super().__init__(
__A , split=__A , features=__A , cache_dir=__A , keep_in_memory=__A , streaming=__A , num_proc=__A , **__A , )
__UpperCamelCase = path_or_paths if isinstance(__A , __A ) else {self.split: path_or_paths}
__UpperCamelCase = _PACKAGED_DATASETS_MODULES['parquet'][1]
__UpperCamelCase = Parquet(
cache_dir=__A , data_files=__A , features=__A , hash=__A , **__A , )
def _lowerCamelCase ( self : Optional[int] ):
# Build iterable dataset
if self.streaming:
__UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
self.builder.download_and_prepare(
download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , num_proc=self.num_proc , )
__UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=__A , in_memory=self.keep_in_memory )
return dataset
class snake_case :
"""simple docstring"""
def __init__( self : List[str] , __A : Dataset , __A : Union[PathLike, BinaryIO] , __A : Optional[int] = None , **__A : Dict , ):
__UpperCamelCase = dataset
__UpperCamelCase = path_or_buf
__UpperCamelCase = batch_size or get_writer_batch_size(dataset.features )
__UpperCamelCase = parquet_writer_kwargs
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__UpperCamelCase = self._write(file_obj=__A , batch_size=__A , **self.parquet_writer_kwargs )
else:
__UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=__A , **self.parquet_writer_kwargs )
return written
def _lowerCamelCase ( self : List[str] , __A : BinaryIO , __A : int , **__A : List[str] ):
__UpperCamelCase = 0
__UpperCamelCase = parquet_writer_kwargs.pop('path_or_buf' , __A )
__UpperCamelCase = self.dataset.features.arrow_schema
__UpperCamelCase = pq.ParquetWriter(__A , schema=__A , **__A )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __A ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__UpperCamelCase = query_table(
table=self.dataset._data , key=slice(__A , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__A )
written += batch.nbytes
writer.close()
return written
| 53
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = "beit"
def __init__( self: List[Any] , UpperCamelCase_: List[Any]=81_92 , UpperCamelCase_: int=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: Optional[Any]=12 , UpperCamelCase_: Union[str, Any]=30_72 , UpperCamelCase_: Optional[Any]="gelu" , UpperCamelCase_: Tuple=0.0 , UpperCamelCase_: int=0.0 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Tuple=1E-12 , UpperCamelCase_: Union[str, Any]=2_24 , UpperCamelCase_: Tuple=16 , UpperCamelCase_: Any=3 , UpperCamelCase_: List[Any]=False , UpperCamelCase_: str=False , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[Any]=False , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Optional[int]=0.1 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=[3, 5, 7, 11] , UpperCamelCase_: str=[1, 2, 3, 6] , UpperCamelCase_: List[str]=True , UpperCamelCase_: Union[str, Any]=0.4 , UpperCamelCase_: Dict=2_56 , UpperCamelCase_: Any=1 , UpperCamelCase_: List[str]=False , UpperCamelCase_: Tuple=2_55 , **UpperCamelCase_: List[str] , ):
super().__init__(**__A )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = use_mask_token
__lowerCamelCase = use_absolute_position_embeddings
__lowerCamelCase = use_relative_position_bias
__lowerCamelCase = use_shared_relative_position_bias
__lowerCamelCase = layer_scale_init_value
__lowerCamelCase = drop_path_rate
__lowerCamelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCamelCase = out_indices
__lowerCamelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCamelCase = use_auxiliary_head
__lowerCamelCase = auxiliary_loss_weight
__lowerCamelCase = auxiliary_channels
__lowerCamelCase = auxiliary_num_convs
__lowerCamelCase = auxiliary_concat_input
__lowerCamelCase = semantic_loss_ignore_index
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Tuple ):
return 1E-4
| 12
|
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def lowercase__ ( __lowercase : SplitDict ) -> int:
"""simple docstring"""
__UpperCamelCase = split_dict._to_yaml_list()
assert len(__lowercase ) == len(__lowercase )
__UpperCamelCase = SplitDict._from_yaml_list(__lowercase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
__UpperCamelCase = None
# the split name of split_dict takes over the name of the split info object
__UpperCamelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name='my_dataset' )] )
def lowercase__ ( __lowercase : Dict ) -> Any:
"""simple docstring"""
__UpperCamelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 53
| 0
|
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = split_dict._to_yaml_list()
assert len(__lowercase ) == len(__lowercase )
SCREAMING_SNAKE_CASE : List[str] = SplitDict._from_yaml_list(__lowercase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
SCREAMING_SNAKE_CASE : str = None
# the split name of split_dict takes over the name of the split info object
SCREAMING_SNAKE_CASE : Any = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name='''my_dataset''' )] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 182
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : List[str] ={
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any =[
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 53
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_A = logging.get_logger(__name__)
_A = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
_A = {
'''yjernite/retribert-base-uncased''': 512,
}
_A = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Any = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Tuple = RetriBertTokenizer
UpperCAmelCase__ : List[str] = ["input_ids", "attention_mask"]
def __init__( self , A_=None , A_=None , A_=True , A_="[UNK]" , A_="[SEP]" , A_="[PAD]" , A_="[CLS]" , A_="[MASK]" , A_=True , A_=None , **A_ , ) -> List[Any]:
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
__UpperCamelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __A ) != do_lower_case
or normalizer_state.get('strip_accents' , __A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __A ) != tokenize_chinese_chars
):
__UpperCamelCase =getattr(__A , normalizer_state.pop('type' ) )
__UpperCamelCase =do_lower_case
__UpperCamelCase =strip_accents
__UpperCamelCase =tokenize_chinese_chars
__UpperCamelCase =normalizer_class(**__A )
__UpperCamelCase =do_lower_case
def _a ( self , A_ , A_=None ) -> Union[str, Any]:
__UpperCamelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self , A_ , A_ = None ) -> List[Any]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , A_ , A_ = None ) -> Dict:
__UpperCamelCase =self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 62
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a__ : str =logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =["input_features", "attention_mask"]
def __init__( self : Union[str, Any] , __A : Optional[int]=8_0 , __A : Tuple=1_6_0_0_0 , __A : Optional[Any]=8_0 , __A : Any=0.0 , __A : Any=True , __A : List[str]=True , __A : str=True , **__A : List[Any] , ):
super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A )
__UpperCamelCase = num_mel_bins
__UpperCamelCase = do_ceptral_normalize
__UpperCamelCase = normalize_means
__UpperCamelCase = normalize_vars
__UpperCamelCase = True
def _lowerCamelCase ( self : Union[str, Any] , __A : np.ndarray , ):
__UpperCamelCase = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
__UpperCamelCase = torch.from_numpy(__A ).unsqueeze(0 )
__UpperCamelCase = ta_kaldi.fbank(__A , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _lowerCamelCase ( __A : np.ndarray , __A : int , __A : Optional[bool] = True , __A : Optional[bool] = True , __A : float = 0.0 , ):
# make sure we normalize float32 arrays
if normalize_means:
__UpperCamelCase = x[:input_length].mean(axis=0 )
__UpperCamelCase = np.subtract(__A , __A )
if normalize_vars:
__UpperCamelCase = x[:input_length].std(axis=0 )
__UpperCamelCase = np.divide(__A , __A )
if input_length < x.shape[0]:
__UpperCamelCase = padding_value
# make sure array is in float32
__UpperCamelCase = x.astype(np.floataa )
return x
def _lowerCamelCase ( self : int , __A : List[np.ndarray] , __A : Optional[np.ndarray] = None ):
__UpperCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__A , __A , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__A , __A )
]
def __call__( self : List[Any] , __A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __A : Union[bool, str, PaddingStrategy] = False , __A : Optional[int] = None , __A : bool = False , __A : Optional[int] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[int] = None , __A : Optional[bool] = None , **__A : Dict , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__UpperCamelCase = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__UpperCamelCase = is_batched_numpy or (
isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__A , np.ndarray ):
__UpperCamelCase = np.asarray(__A , dtype=np.floataa )
elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCamelCase = [raw_speech]
# extract fbank features
__UpperCamelCase = [self._extract_fbank_features(__A ) for waveform in raw_speech]
# convert into correct format for padding
__UpperCamelCase = BatchFeature({'input_features': features} )
__UpperCamelCase = self.pad(
__A , padding=__A , max_length=__A , truncation=__A , pad_to_multiple_of=__A , return_attention_mask=__A , **__A , )
# make sure list is in array format
__UpperCamelCase = padded_inputs.get('input_features' )
if isinstance(input_features[0] , __A ):
__UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for feature in input_features]
__UpperCamelCase = padded_inputs.get('attention_mask' )
if attention_mask is not None:
__UpperCamelCase = [np.asarray(__A , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__UpperCamelCase = (
np.array(__A , dtype=np.intaa )
if self._get_padding_strategies(__A , max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__UpperCamelCase = self.normalize(
padded_inputs['input_features'] , attention_mask=__A )
if return_tensors is not None:
__UpperCamelCase = padded_inputs.convert_to_tensors(__A )
return padded_inputs
| 53
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase_ : Any = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , __a : List[str] , __a : Tuple=7 , __a : Optional[int]=3 , __a : Optional[Any]=18 , __a : Any=30 , __a : Dict=4_00 , __a : List[Any]=None , __a : List[str]=True , __a : Tuple=True , __a : int=None , ):
_a = size if size is not None else {"height": 20, "width": 20}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = size
_a = do_normalize
_a = do_convert_rgb
_a = [5_12, 10_24, 20_48, 40_96]
_a = patch_size if patch_size is not None else {"height": 16, "width": 16}
def UpperCamelCase__ ( self : Optional[Any] ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCamelCase__ ( self : Tuple ):
_a = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
_a = Image.open(requests.get(__A , stream=__A ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE (__lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
__a =PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self : List[Any] ):
_a = PixaStructImageProcessingTester(self )
@property
def UpperCamelCase__ ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self : List[str] ):
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_convert_rgb" ) )
def UpperCamelCase__ ( self : Tuple ):
_a = self.image_processor_tester.prepare_dummy_image()
_a = self.image_processing_class(**self.image_processor_dict )
_a = 20_48
_a = image_processor(__A , return_tensors="pt" , max_patches=__A )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
# Initialize image_processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
_a = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a = image_processor(
__A , return_tensors="pt" , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase__ ( self : int ):
# Initialize image_processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
_a = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
_a = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__A ):
_a = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__A ).flattened_patches
_a = "Hello"
_a = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__A , header_text=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a = image_processor(
__A , return_tensors="pt" , max_patches=__A , header_text=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase__ ( self : Tuple ):
# Initialize image_processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
_a = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a = image_processor(
__A , return_tensors="pt" , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase__ ( self : Optional[int] ):
# Initialize image_processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
_a = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a = image_processor(
__A , return_tensors="pt" , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE (__lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
__a =PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self : Dict ):
_a = PixaStructImageProcessingTester(self , num_channels=4 )
_a = 3
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self : Dict ):
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_convert_rgb" ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
# Initialize image_processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
_a = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a = image_processor(
__A , return_tensors="pt" , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 63
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : List[Any] =logging.get_logger(__name__)
a__ : List[Any] ={
'''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="altclip_text_model"
def __init__( self : str , __A : List[Any]=2_5_0_0_0_2 , __A : Any=1_0_2_4 , __A : int=2_4 , __A : Dict=1_6 , __A : Optional[Any]=4_0_9_6 , __A : Union[str, Any]="gelu" , __A : Dict=0.1 , __A : Dict=0.1 , __A : List[str]=5_1_4 , __A : Optional[int]=1 , __A : int=0.02 , __A : Optional[Any]=0.02 , __A : Optional[Any]=1e-05 , __A : Dict=1 , __A : List[Any]=0 , __A : int=2 , __A : Tuple="absolute" , __A : Optional[Any]=True , __A : Optional[int]=7_6_8 , **__A : List[str] , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = initializer_factor
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = project_dim
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="altclip_vision_model"
def __init__( self : List[Any] , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=3_0_7_2 , __A : Optional[Any]=5_1_2 , __A : Tuple=1_2 , __A : Union[str, Any]=1_2 , __A : Optional[int]=3 , __A : Dict=2_2_4 , __A : Tuple=3_2 , __A : str="quick_gelu" , __A : Dict=1e-5 , __A : Optional[int]=0.0 , __A : List[Any]=0.02 , __A : int=1.0 , **__A : Optional[int] , ):
super().__init__(**__A )
__UpperCamelCase = hidden_size
__UpperCamelCase = intermediate_size
__UpperCamelCase = projection_dim
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = num_channels
__UpperCamelCase = patch_size
__UpperCamelCase = image_size
__UpperCamelCase = initializer_range
__UpperCamelCase = initializer_factor
__UpperCamelCase = attention_dropout
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = hidden_act
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ):
cls._set_token_in_kwargs(__A )
__UpperCamelCase , __UpperCamelCase = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type' ) == "altclip":
__UpperCamelCase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] ="altclip"
SCREAMING_SNAKE_CASE_ : Optional[int] =True
def __init__( self : Any , __A : List[str]=None , __A : List[Any]=None , __A : List[str]=7_6_8 , __A : List[str]=2.6592 , **__A : Dict ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
__UpperCamelCase = kwargs.pop('text_config_dict' , __A )
__UpperCamelCase = kwargs.pop('vision_config_dict' , __A )
super().__init__(**__A )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
__UpperCamelCase = {}
# This is the complete result when using `text_config_dict`.
__UpperCamelCase = AltCLIPTextConfig(**__A ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
__UpperCamelCase = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
__UpperCamelCase = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(__A )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
__UpperCamelCase = {}
# This is the complete result when using `vision_config_dict`.
__UpperCamelCase = AltCLIPVisionConfig(**__A ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
__UpperCamelCase = {
str(__A ): value for key, value in _vision_config_dict['id2label'].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
__UpperCamelCase = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
__UpperCamelCase = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(__A )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
__UpperCamelCase = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' )
if vision_config is None:
__UpperCamelCase = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' )
__UpperCamelCase = AltCLIPTextConfig(**__A )
__UpperCamelCase = AltCLIPVisionConfig(**__A )
__UpperCamelCase = projection_dim
__UpperCamelCase = logit_scale_init_value
__UpperCamelCase = 1.0
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , __A : AltCLIPTextConfig , __A : AltCLIPVisionConfig , **__A : Optional[Any] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A )
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.text_config.to_dict()
__UpperCamelCase = self.vision_config.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 53
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase : Dict = StableDiffusionInstructPixaPixPipeline
lowerCAmelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
lowerCAmelCase : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self : List[Any] ) ->Dict:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_UpperCAmelCase : Union[str, Any] = PNDMScheduler(skip_prk_steps=__A )
torch.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_UpperCAmelCase : Dict = CLIPTextModel(__A )
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase : Any = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Any=0 ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
_UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : List[str] = Image.fromarray(np.uinta(__A ) ).convert("RGB" )
if str(__A ).startswith("mps" ):
_UpperCAmelCase : Union[str, Any] = torch.manual_seed(__A )
else:
_UpperCAmelCase : Dict = torch.Generator(device=__A ).manual_seed(__A )
_UpperCAmelCase : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : List[Any] = self.get_dummy_components()
_UpperCAmelCase : Any = StableDiffusionInstructPixaPixPipeline(**__A )
_UpperCAmelCase : List[str] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
_UpperCAmelCase : List[str] = self.get_dummy_inputs(__A )
_UpperCAmelCase : Dict = sd_pipe(**__A ).images
_UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase : Union[str, Any] = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Optional[Any] = self.get_dummy_components()
_UpperCAmelCase : int = StableDiffusionInstructPixaPixPipeline(**__A )
_UpperCAmelCase : str = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
_UpperCAmelCase : Any = self.get_dummy_inputs(__A )
_UpperCAmelCase : str = "french fries"
_UpperCAmelCase : List[Any] = sd_pipe(**__A , negative_prompt=__A )
_UpperCAmelCase : List[Any] = output.images
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase : List[str] = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase : str = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Optional[Any] = self.get_dummy_components()
_UpperCAmelCase : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**__A )
_UpperCAmelCase : Optional[int] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
_UpperCAmelCase : List[Any] = self.get_dummy_inputs(__A )
_UpperCAmelCase : List[str] = [inputs["prompt"]] * 2
_UpperCAmelCase : Optional[int] = np.array(inputs["image"] ).astype(np.floataa ) / 2_5_5.0
_UpperCAmelCase : Tuple = torch.from_numpy(__A ).unsqueeze(0 ).to(__A )
_UpperCAmelCase : List[Any] = image / 2 + 0.5
_UpperCAmelCase : Optional[int] = image.permute(0 , 3 , 1 , 2 )
_UpperCAmelCase : List[str] = image.repeat(2 , 1 , 1 , 1 )
_UpperCAmelCase : int = sd_pipe(**__A ).images
_UpperCAmelCase : Dict = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
_UpperCAmelCase : Union[str, Any] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self : List[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Any = self.get_dummy_components()
_UpperCAmelCase : Optional[int] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" )
_UpperCAmelCase : int = StableDiffusionInstructPixaPixPipeline(**__A )
_UpperCAmelCase : int = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
_UpperCAmelCase : List[str] = self.get_dummy_inputs(__A )
_UpperCAmelCase : List[Any] = sd_pipe(**__A ).images
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[int] = [round(__A , 4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(__A ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase : int = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self : Any ) ->int:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCAmelCase__ ( self : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : int = self.get_dummy_components()
_UpperCAmelCase : List[str] = StableDiffusionInstructPixaPixPipeline(**__A )
_UpperCAmelCase : int = VaeImageProcessor(do_resize=__A , do_normalize=__A )
_UpperCAmelCase : str = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_UpperCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs_by_type(__A , input_image_type="pt" ) )[0]
_UpperCAmelCase : Any = components["vae"]
_UpperCAmelCase : List[Any] = self.get_dummy_inputs_by_type(__A , input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_UpperCAmelCase : List[Any] = vae.encode(inputs[image_param] ).latent_dist.mode()
_UpperCAmelCase : int = pipe(**__A )[0]
_UpperCAmelCase : int = np.abs(out - out_latents_inputs ).max()
self.assertLess(__A , 1E-4 , "passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Any ) ->int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : int=0 ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : List[Any] = torch.manual_seed(__A )
_UpperCAmelCase : Any = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
_UpperCAmelCase : List[Any] = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self : Optional[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
_UpperCAmelCase : Optional[int] = self.get_inputs()
_UpperCAmelCase : Tuple = pipe(**__A ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : int = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCAmelCase__ ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A )
_UpperCAmelCase : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
_UpperCAmelCase : int = self.get_inputs()
_UpperCAmelCase : List[str] = pipe(**__A ).images
_UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : Tuple = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCAmelCase__ ( self : int ) ->int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A )
_UpperCAmelCase : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
_UpperCAmelCase : str = self.get_inputs()
_UpperCAmelCase : str = pipe(**__A ).images
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : List[Any] = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCAmelCase__ ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = 0
def callback_fn(lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : torch.FloatTensor ) -> None:
_UpperCAmelCase : List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_UpperCAmelCase : Optional[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_UpperCAmelCase : str = latents[0, -3:, -3:, -1]
_UpperCAmelCase : Any = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
_UpperCAmelCase : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_UpperCAmelCase : List[str] = latents[0, -3:, -3:, -1]
_UpperCAmelCase : Any = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A , torch_dtype=torch.floataa )
_UpperCAmelCase : Any = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
_UpperCAmelCase : str = self.get_inputs()
pipe(**__A , callback=__A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCAmelCase__ ( self : List[str] ) ->Optional[int]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A , torch_dtype=torch.floataa )
_UpperCAmelCase : Tuple = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase : List[str] = self.get_inputs()
_UpperCAmelCase : int = pipe(**__A )
_UpperCAmelCase : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowerCAmelCase__ ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_UpperCAmelCase : Optional[int] = inputs["image"].resize((5_04, 5_04) )
_UpperCAmelCase : List[Any] = "timbrooks/instruct-pix2pix"
_UpperCAmelCase : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__A , safety_checker=__A , )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
_UpperCAmelCase : List[Any] = pipe(**__A )
_UpperCAmelCase : str = output.images[0]
_UpperCAmelCase : Any = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
_UpperCAmelCase : int = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 234
|
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Any ) -> Optional[Any]:
"""simple docstring"""
with open(__lowercase ) as metadata_file:
__UpperCamelCase = json.load(__lowercase )
__UpperCamelCase = LukeConfig(use_entity_aware_attention=__lowercase , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
__UpperCamelCase = torch.load(__lowercase , map_location='cpu' )
# Load the entity vocab file
__UpperCamelCase = load_entity_vocab(__lowercase )
__UpperCamelCase = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
__UpperCamelCase = AddedToken('<ent>' , lstrip=__lowercase , rstrip=__lowercase )
__UpperCamelCase = AddedToken('<ent2>' , lstrip=__lowercase , rstrip=__lowercase )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(__lowercase , __lowercase )
__UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase )
# Initialize the embeddings of the special tokens
__UpperCamelCase = state_dict['embeddings.word_embeddings.weight']
__UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
__UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
__UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__UpperCamelCase = F'''encoder.layer.{layer_index}.attention.self.'''
__UpperCamelCase = state_dict[prefix + matrix_name]
__UpperCamelCase = state_dict[prefix + matrix_name]
__UpperCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__UpperCamelCase = state_dict['entity_embeddings.entity_embeddings.weight']
__UpperCamelCase = entity_emb[entity_vocab['[MASK]']]
__UpperCamelCase = LukeModel(config=__lowercase ).eval()
__UpperCamelCase , __UpperCamelCase = model.load_state_dict(__lowercase , strict=__lowercase )
if not (len(__lowercase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'''Missing keys {', '.join(__lowercase )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
F''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' )
# Check outputs
__UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase , task='entity_classification' )
__UpperCamelCase = (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
__UpperCamelCase = (39, 42)
__UpperCamelCase = tokenizer(__lowercase , entity_spans=[span] , add_prefix_space=__lowercase , return_tensors='pt' )
__UpperCamelCase = model(**__lowercase )
# Verify word hidden states
if model_size == "large":
__UpperCamelCase = torch.Size((1, 42, 1024) )
__UpperCamelCase = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
__UpperCamelCase = torch.Size((1, 42, 768) )
__UpperCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
__UpperCamelCase = torch.Size((1, 1, 1024) )
__UpperCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
__UpperCamelCase = torch.Size((1, 1, 768) )
__UpperCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(__lowercase ) )
model.save_pretrained(__lowercase )
def lowercase__ ( __lowercase : Dict ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = {}
with open(__lowercase , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(__lowercase ):
__UpperCamelCase , __UpperCamelCase = line.rstrip().split('\t' )
__UpperCamelCase = index
return entity_vocab
if __name__ == "__main__":
a__ : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
a__ : str =parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 53
| 0
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ : Dict = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ) -> Dict:
A_ : List[Any] = graph
# mapping node to its parent in resulting breadth first tree
A_ : Union[str, Any] = {}
A_ : Optional[int] = source_vertex
def UpperCAmelCase_ ( self ) -> Any:
A_ : int = {self.source_vertex}
A_ : List[Any] = None
A_ : Dict = [self.source_vertex] # first in first out queue
while queue:
A_ : int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__A )
A_ : str = vertex
queue.append(__A )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Optional[Any]:
if target_vertex == self.source_vertex:
return self.source_vertex
A_ : Union[str, Any] = self.parent.get(__A )
if target_vertex_parent is None:
A_ : List[str] = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(__A )
return self.shortest_path(__A ) + F"->{target_vertex}"
if __name__ == "__main__":
UpperCamelCase__ : Any = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 344
|
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = 8
# DPR tok
__UpperCamelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__UpperCamelCase = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(__A , exist_ok=__A )
__UpperCamelCase = os.path.join(__A , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__UpperCamelCase = dict(zip(__A , range(len(__A ) ) ) )
__UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__UpperCamelCase = {'unk_token': '<unk>'}
__UpperCamelCase = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(__A , exist_ok=__A )
__UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__A ) )
def _lowerCamelCase ( self : Tuple ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _lowerCamelCase ( self : Optional[int] ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _lowerCamelCase ( self : Union[str, Any] ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def _lowerCamelCase ( self : str ):
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = self.get_dummy_dataset()
__UpperCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__UpperCamelCase = dataset
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowerCamelCase ( self : Any , __A : bool ):
__UpperCamelCase = self.get_dummy_dataset()
__UpperCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
__UpperCamelCase = os.path.join(self.tmpdirname , 'dataset' )
__UpperCamelCase = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , )
return retriever
def _lowerCamelCase ( self : int ):
__UpperCamelCase = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCamelCase = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
__UpperCamelCase = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
__UpperCamelCase = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(__A , open(__A , 'wb' ) )
__UpperCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_canonical_hf_index_retriever()
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__UpperCamelCase = self.get_dummy_dataset()
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_legacy_index_retriever()
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , __A )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowerCamelCase ( self : Optional[Any] ):
import torch
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_canonical_hf_index_retriever()
__UpperCamelCase = [[5, 7], [1_0, 1_1]]
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , np.ndarray )
__UpperCamelCase = retriever(
__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors='pt' , )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.get_dpr_ctx_encoder_tokenizer()
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
retriever.set_ctx_encoder_tokenizer(__A )
__UpperCamelCase = [[5, 7], [1_0, 1_1]]
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
self.assertEqual(
len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , __A ) # check for doc token related keys in dictionary.
| 53
| 0
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowercase__ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCamelCase( UpperCAmelCase_ ):
for pegasus_name, hf_name in PATTERNS:
UpperCAmelCase : Optional[Any] = k.replace(__lowercase , __lowercase )
return k
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = DEFAULTS.copy()
cfg_kwargs.update(__lowercase )
UpperCAmelCase : Optional[Any] = PegasusConfig(**__lowercase )
UpperCAmelCase : str = PegasusForConditionalGeneration(__lowercase )
UpperCAmelCase : Union[str, Any] = torch_model.model.state_dict()
UpperCAmelCase : Optional[Any] = {}
for k, v in tf_weights.items():
UpperCAmelCase : Optional[Any] = rename_state_dict_key(__lowercase )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
UpperCAmelCase : Any = v.T
UpperCAmelCase : str = torch.tensor(__lowercase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
UpperCAmelCase : str = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
UpperCAmelCase : Union[str, Any] = mapping['shared.weight']
UpperCAmelCase : Dict = mapping['shared.weight']
UpperCAmelCase : Optional[Any] = {k: torch.zeros_like(__lowercase ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**__lowercase )
UpperCAmelCase , UpperCAmelCase : Tuple = torch_model.model.load_state_dict(__lowercase , strict=__lowercase )
UpperCAmelCase : Any = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def UpperCamelCase( UpperCAmelCase_="./ckpt/aeslc/model.ckpt-32000" ):
UpperCAmelCase : Optional[int] = tf.train.list_variables(__lowercase )
UpperCAmelCase : str = {}
UpperCAmelCase : Optional[Any] = ['Adafactor', 'global_step']
for name, shape in tqdm(__lowercase , desc='converting tf checkpoint to dict' ):
UpperCAmelCase : Optional[int] = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase : Any = tf.train.load_variable(__lowercase , __lowercase )
UpperCAmelCase : Optional[int] = array
return tf_weights
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = Path(__lowercase ).parent.name
UpperCAmelCase : Dict = task_specific_params[F"""summarization_{dataset}"""]['max_position_embeddings']
UpperCAmelCase : Union[str, Any] = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=__lowercase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__lowercase )
# convert model
UpperCAmelCase : List[str] = get_tf_weights_as_numpy(__lowercase )
UpperCAmelCase : Optional[Any] = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
UpperCAmelCase : Dict = task_specific_params
UpperCAmelCase : int = convert_pegasus(__lowercase , __lowercase )
torch_model.save_pretrained(__lowercase )
UpperCAmelCase : str = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(__lowercase , Path(__lowercase ) / 'pytorch_model.bin' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
lowercase__ = parser.parse_args()
if args.save_dir is None:
lowercase__ = Path(args.tf_ckpt_path).parent.name
lowercase__ = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 151
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : List[Any] ={
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] =[
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
a__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 53
| 0
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class UpperCamelCase__ ( pl.LightningModule ):
"""simple docstring"""
def __init__( self , _A ) -> List[Any]:
super().__init__()
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = nn.Linear(self.model.config.hidden_size , self.num_labels )
def _UpperCamelCase ( self ) -> List[str]:
pass
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = LongformerModel.from_pretrained(__lowercase )
SCREAMING_SNAKE_CASE_ = LightningModel(__lowercase )
SCREAMING_SNAKE_CASE_ = torch.load(__lowercase, map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
SCREAMING_SNAKE_CASE_ = LongformerForQuestionAnswering.from_pretrained(__lowercase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__lowercase )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCAmelCase = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 299
|
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any]=False ) -> Tuple:
"""simple docstring"""
try:
__UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__UpperCamelCase = strtobool(__lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
a__ : str =parse_flag_from_env('''RUN_SLOW''', default=False)
a__ : Union[str, Any] =parse_flag_from_env('''RUN_REMOTE''', default=False)
a__ : List[str] =parse_flag_from_env('''RUN_LOCAL''', default=True)
a__ : Optional[int] =parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
a__ : Any =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
a__ : Optional[int] =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
a__ : List[str] =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
a__ : Any =pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
a__ : Tuple =pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
a__ : Union[str, Any] =pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
a__ : int =pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def lowercase__ ( __lowercase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
__UpperCamelCase = unittest.skip('test requires faiss' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Union[str, Any] ) -> Any:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
__UpperCamelCase = unittest.skip('test requires regex' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Tuple ) -> List[Any]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
__UpperCamelCase = unittest.skip('test requires elasticsearch' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
__UpperCamelCase = unittest.skip('test requires sqlalchemy' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : List[str] ) -> List[str]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
__UpperCamelCase = unittest.skip('test requires PyTorch' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Optional[Any] ) -> List[str]:
"""simple docstring"""
if not config.TF_AVAILABLE:
__UpperCamelCase = unittest.skip('test requires TensorFlow' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
if not config.JAX_AVAILABLE:
__UpperCamelCase = unittest.skip('test requires JAX' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : str ) -> Optional[Any]:
"""simple docstring"""
if not config.PIL_AVAILABLE:
__UpperCamelCase = unittest.skip('test requires Pillow' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Dict ) -> Any:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : int ) -> int:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : str ) -> int:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : str ) -> Any:
"""simple docstring"""
def _require_spacy_model(__lowercase : Any ):
try:
import spacy # noqa F401
spacy.load(__lowercase )
except ImportError:
return unittest.skip('test requires spacy' )(__lowercase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__lowercase ) )(__lowercase )
else:
return test_case
return _require_spacy_model
def lowercase__ ( __lowercase : Union[str, Any] ) -> str:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__lowercase )
else:
return test_case
def lowercase__ ( __lowercase : List[Any] ) -> List[str]:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
__UpperCamelCase = unittest.skip('test is slow' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : List[Any] ) -> List[str]:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
__UpperCamelCase = unittest.skip('test is local' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : str ) -> List[str]:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
__UpperCamelCase = unittest.skip('test is packaged' )(__lowercase )
return test_case
def lowercase__ ( __lowercase : Optional[int] ) -> Any:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
__UpperCamelCase = unittest.skip('test requires remote' )(__lowercase )
return test_case
def lowercase__ ( *__lowercase : Optional[Any] ) -> Tuple:
"""simple docstring"""
def decorate(cls : int ):
for name, fn in cls.__dict__.items():
if callable(__lowercase ) and name.startswith('test' ):
for decorator in decorators:
__UpperCamelCase = decorator(__lowercase )
setattr(cls , __lowercase , __lowercase )
return cls
return decorate
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
pass
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =0
SCREAMING_SNAKE_CASE_ : List[Any] =1
SCREAMING_SNAKE_CASE_ : Union[str, Any] =2
@contextmanager
def lowercase__ ( __lowercase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , __lowercase : Dict=1e-16 ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = requests.Session().request
def timeout_request(__lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , **__lowercase : List[str] ):
# Change the url to an invalid url so that the connection hangs
__UpperCamelCase = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__UpperCamelCase = timeout
try:
return online_request(__lowercase , __lowercase , **__lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__UpperCamelCase = url
__UpperCamelCase = e.args[0]
__UpperCamelCase = (max_retry_error.args[0].replace('10.255.255.1' , F'''OfflineMock[{url}]''' ),)
__UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(__lowercase : int , __lowercase : List[str] , **__lowercase : Union[str, Any] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __lowercase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def lowercase__ ( *__lowercase : Any , **__lowercase : Dict ) -> Dict:
"""simple docstring"""
__UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__lowercase , **__lowercase ) as tmp_dir:
try:
os.chdir(__lowercase )
yield
finally:
os.chdir(__lowercase )
@contextmanager
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowercase__ ( __lowercase : List[str] , __lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
return deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist()
def lowercase__ ( __lowercase : str ) -> List[str]:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(__lowercase : List[Any] , *__lowercase : Tuple , **__lowercase : Union[str, Any] ):
try:
return func(*__lowercase , **__lowercase )
except HTTPError as err:
if str(__lowercase ).startswith('500' ) or str(__lowercase ).startswith('502' ):
pytest.xfail(str(__lowercase ) )
raise err
return decorator.decorator(_wrapper , __lowercase )
class snake_case :
"""simple docstring"""
def __init__( self : int , __A : Any , __A : str , __A : List[Any] ):
__UpperCamelCase = returncode
__UpperCamelCase = stdout
__UpperCamelCase = stderr
async def lowercase__ ( __lowercase : Any , __lowercase : Optional[int] ) -> str:
"""simple docstring"""
while True:
__UpperCamelCase = await stream.readline()
if line:
callback(__lowercase )
else:
break
async def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None , __lowercase : Any=None , __lowercase : Optional[Any]=None , __lowercase : int=False , __lowercase : List[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('\nRunning: ' , ' '.join(__lowercase ) )
__UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowercase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__UpperCamelCase = []
__UpperCamelCase = []
def tee(__lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : Tuple="" ):
__UpperCamelCase = line.decode('utf-8' ).rstrip()
sink.append(__lowercase )
if not quiet:
print(__lowercase , __lowercase , file=__lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __lowercase : tee(__lowercase , __lowercase , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda __lowercase : tee(__lowercase , __lowercase , sys.stderr , label='stderr:' ) ),
] , timeout=__lowercase , )
return _RunOutput(await p.wait() , __lowercase , __lowercase )
def lowercase__ ( __lowercase : Dict , __lowercase : Any=None , __lowercase : int=None , __lowercase : int=180 , __lowercase : int=False , __lowercase : str=True ) -> _RunOutput:
"""simple docstring"""
__UpperCamelCase = asyncio.get_event_loop()
__UpperCamelCase = loop.run_until_complete(
_stream_subprocess(__lowercase , env=__lowercase , stdin=__lowercase , timeout=__lowercase , quiet=__lowercase , echo=__lowercase ) )
__UpperCamelCase = ' '.join(__lowercase )
if result.returncode > 0:
__UpperCamelCase = '\n'.join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' )
return result
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__UpperCamelCase = re.sub(R'^gw' , '' , __lowercase , 0 , re.M )
return int(__lowercase )
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = 29500
__UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 53
| 0
|
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowerCAmelCase: Union[str, Any] = '''src/transformers'''
lowerCAmelCase: str = '''docs/source/en/tasks'''
def lowerCamelCase__ ( _A , _A , _A ):
with open(__lowercase , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Dict = f.readlines()
# Find the start prompt.
a : str = 0
while not lines[start_index].startswith(__lowercase ):
start_index += 1
start_index += 1
a : Dict = start_index
while not lines[end_index].startswith(__lowercase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase: Dict = direct_transformers_import(TRANSFORMERS_PATH)
lowerCAmelCase: Optional[Any] = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowerCAmelCase: Dict = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def lowerCamelCase__ ( _A ):
a : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
a : List[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__lowercase , set() )
a : int = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def lowerCamelCase__ ( _A , _A=False ):
a , a , a , a : List[str] = _find_text_in_file(
filename=os.path.join(__lowercase , __lowercase ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
a : List[Any] = get_model_list_for_task(__lowercase )
if current_list != new_list:
if overwrite:
with open(os.path.join(__lowercase , __lowercase ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
' to fix this.' )
if __name__ == "__main__":
lowerCAmelCase: str = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase: Optional[int] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 297
|
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
a__ : Tuple ='''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 53
| 0
|
"""simple docstring"""
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Sequence[float] ,_lowerCamelCase : bool = False ) -> float:
if not arr:
return 0
_lowerCAmelCase : Optional[int] = 0 if allow_empty_subarrays else float("""-inf""" )
_lowerCAmelCase : List[Any] = 0.0
for num in arr:
_lowerCAmelCase : List[str] = max(0 if allow_empty_subarrays else num ,curr_sum + num )
_lowerCAmelCase : Union[str, Any] = max(__lowercase ,__lowercase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_a : Union[str, Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"""{max_subarray_sum(nums) = }""")
| 44
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Tuple ) -> Tuple:
"""simple docstring"""
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[str] , __lowercase : List[str]="attention" ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
__UpperCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
__UpperCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
__UpperCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
__UpperCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowercase__ ( __lowercase : Tuple , __lowercase : Dict , __lowercase : int , __lowercase : List[Any]=False ) -> Optional[Any]:
"""simple docstring"""
if split_mlp_wi:
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
__UpperCamelCase = (wi_a, wi_a)
else:
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Optional[int] ) -> str:
"""simple docstring"""
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def lowercase__ ( __lowercase : dict , *, __lowercase : int , __lowercase : bool , __lowercase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = traverse_util.flatten_dict(variables['target'] )
__UpperCamelCase = {'/'.join(__lowercase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__UpperCamelCase = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , __lowercase )
__UpperCamelCase = collections.OrderedDict()
# Shared embeddings.
__UpperCamelCase = old['token_embedder/embedding']
# Encoder.
for i in range(__lowercase ):
# Block i, layer 0 (Self Attention).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'encoder' , 'attention' )
__UpperCamelCase = layer_norm
__UpperCamelCase = k.T
__UpperCamelCase = o.T
__UpperCamelCase = q.T
__UpperCamelCase = v.T
# Block i, layer 1 (MLP).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_mlp_layer_norm' )
__UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'encoder' , __lowercase )
__UpperCamelCase = layer_norm
if split_mlp_wi:
__UpperCamelCase = wi[0].T
__UpperCamelCase = wi[1].T
else:
__UpperCamelCase = wi.T
__UpperCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCamelCase = tax_relpos_bias_lookup(
__lowercase , __lowercase , 'encoder' ).T
__UpperCamelCase = old['encoder/encoder_norm/scale']
if not scalable_attention:
__UpperCamelCase = tax_relpos_bias_lookup(
__lowercase , 0 , 'encoder' ).T
__UpperCamelCase = tax_relpos_bias_lookup(
__lowercase , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowercase ):
# Block i, layer 0 (Self Attention).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_self_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'self_attention' )
__UpperCamelCase = layer_norm
__UpperCamelCase = k.T
__UpperCamelCase = o.T
__UpperCamelCase = q.T
__UpperCamelCase = v.T
# Block i, layer 1 (Cross Attention).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_cross_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'encoder_decoder_attention' )
__UpperCamelCase = layer_norm
__UpperCamelCase = k.T
__UpperCamelCase = o.T
__UpperCamelCase = q.T
__UpperCamelCase = v.T
# Block i, layer 2 (MLP).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_mlp_layer_norm' )
__UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'decoder' , __lowercase )
__UpperCamelCase = layer_norm
if split_mlp_wi:
__UpperCamelCase = wi[0].T
__UpperCamelCase = wi[1].T
else:
__UpperCamelCase = wi.T
__UpperCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCamelCase = tax_relpos_bias_lookup(__lowercase , __lowercase , 'decoder' ).T
__UpperCamelCase = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__UpperCamelCase = old['decoder/logits_dense/kernel'].T
return new
def lowercase__ ( __lowercase : Optional[Any] , __lowercase : bool ) -> int:
"""simple docstring"""
__UpperCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__UpperCamelCase = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__UpperCamelCase = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
__UpperCamelCase = state_dict['shared.weight']
return state_dict
def lowercase__ ( __lowercase : List[str] , __lowercase : Dict , __lowercase : str , __lowercase : int , __lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = checkpoints.load_tax_checkpoint(__lowercase )
__UpperCamelCase = convert_tax_to_pytorch(
__lowercase , num_layers=config.num_layers , is_encoder_only=__lowercase , scalable_attention=__lowercase )
__UpperCamelCase = make_state_dict(__lowercase , __lowercase )
model.load_state_dict(__lowercase , strict=__lowercase )
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : bool = False , __lowercase : bool = False , ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = MTaConfig.from_json_file(__lowercase )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__UpperCamelCase = UMTaEncoderModel(__lowercase )
else:
__UpperCamelCase = UMTaForConditionalGeneration(__lowercase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__lowercase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowercase )
print('Done' )
if __name__ == "__main__":
a__ : List[Any] =argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
a__ : List[str] =parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 53
| 0
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class SCREAMING_SNAKE_CASE__ :
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
return None
class SCREAMING_SNAKE_CASE__ :
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
return None
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__lowerCAmelCase : str = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__A , """tf""" , 12 , **__A )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__A , """pt""" , 12 , **__A )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
from transformers import BertModel
UpperCAmelCase : str = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__A ) )
vocab_file.flush()
UpperCAmelCase : int = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
UpperCAmelCase : Dict = BertModel(BertConfig(vocab_size=len(__A ) ) )
model.save_pretrained(__A )
self._test_export(__A , """pt""" , 12 , __A )
@require_tf
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCAmelCase : int = self._test_export(__A , """tf""" , 12 , **__A )
UpperCAmelCase : int = quantize(Path(__A ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__A ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCAmelCase : List[str] = self._test_export(__A , """pt""" , 12 , **__A )
UpperCAmelCase : List[Any] = quantize(__A )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__A ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
UpperCAmelCase : int = Path(__A ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__A , __A , __A , __A , __A , **__A )
return path
except Exception as e:
self.fail(__A )
@require_torch
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
from transformers import BertModel
UpperCAmelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
UpperCAmelCase : List[str] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__A , __A , """pt""" )
@require_tf
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
from transformers import TFBertModel
UpperCAmelCase : Optional[Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
UpperCAmelCase : Dict = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__A , __A , """tf""" )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple = FeatureExtractionPipeline(__A , __A )
UpperCAmelCase : Dict = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = infer_shapes(__A , __A )
# Assert all variables are present
self.assertEqual(len(__A ) , len(__A ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __A )
self.assertSequenceEqual(variable_names[3:] , __A )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = ["""input_ids""", """attention_mask""", """token_type_ids"""]
UpperCAmelCase : int = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
UpperCAmelCase , UpperCAmelCase : Dict = ensure_valid_input(FuncContiguousArgs() , __A , __A )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__A ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__A ) , set(__A ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__A , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
UpperCAmelCase , UpperCAmelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __A , __A )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__A ) , 1 )
self.assertEqual(len(__A ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : str = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 109
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : List[Any] ="BlipImageProcessor"
SCREAMING_SNAKE_CASE_ : Optional[int] =("BertTokenizer", "BertTokenizerFast")
def __init__( self : Dict , __A : Optional[int] , __A : List[Any] ):
__UpperCamelCase = False
super().__init__(__A , __A )
__UpperCamelCase = self.image_processor
def __call__( self : List[Any] , __A : ImageInput = None , __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = None , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : List[Any] , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
__UpperCamelCase = self.tokenizer
__UpperCamelCase = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
return text_encoding
# add pixel_values
__UpperCamelCase = self.image_processor(__A , return_tensors=__A )
if text is not None:
__UpperCamelCase = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
else:
__UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(__A )
return encoding_image_processor
def _lowerCamelCase ( self : List[Any] , *__A : Dict , **__A : Optional[int] ):
return self.tokenizer.batch_decode(*__A , **__A )
def _lowerCamelCase ( self : List[Any] , *__A : List[str] , **__A : Dict ):
return self.tokenizer.decode(*__A , **__A )
@property
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = self.tokenizer.model_input_names
__UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 53
| 0
|
import qiskit
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
__lowerCamelCase = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
__lowerCamelCase = qiskit.QuantumCircuit(__lowercase , __lowercase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
__lowerCamelCase = qiskit.execute(__lowercase , __lowercase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
UpperCAmelCase_ = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 12
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
pass
class snake_case :
"""simple docstring"""
def __init__( self : List[Any] , __A : Any ):
__UpperCamelCase = data
__UpperCamelCase = None
def __iter__( self : Optional[Any] ):
__UpperCamelCase = self
__UpperCamelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__A )
yield node.data
__UpperCamelCase = node.next_node
@property
def _lowerCamelCase ( self : List[str] ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
a__ : Dict =Node(1)
a__ : Optional[int] =Node(2)
a__ : List[str] =Node(3)
a__ : Optional[int] =Node(4)
print(root_node.has_loop) # False
a__ : str =root_node.next_node
print(root_node.has_loop) # True
a__ : Optional[int] =Node(5)
a__ : List[Any] =Node(6)
a__ : int =Node(5)
a__ : Tuple =Node(6)
print(root_node.has_loop) # False
a__ : str =Node(1)
print(root_node.has_loop) # False
| 53
| 0
|
def A ( _lowercase = 50_000_000 ):
SCREAMING_SNAKE_CASE : Dict = set()
SCREAMING_SNAKE_CASE : Union[str, Any] = int((limit - 24) ** (1 / 2) )
SCREAMING_SNAKE_CASE : int = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , __lowercase ) ) )
for primea in primes:
SCREAMING_SNAKE_CASE : List[str] = primea * primea
for primea in primes:
SCREAMING_SNAKE_CASE : str = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
SCREAMING_SNAKE_CASE : List[Any] = primea * primea * primea * primea
SCREAMING_SNAKE_CASE : int = square + cube + tetr
if total >= limit:
break
ret.add(__lowercase )
return len(__lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 182
|
'''simple docstring'''
a__ : Optional[Any] =256
# Modulus to hash a string
a__ : Dict =1_000_003
def lowercase__ ( __lowercase : str , __lowercase : str ) -> bool:
"""simple docstring"""
__UpperCamelCase = len(__lowercase )
__UpperCamelCase = len(__lowercase )
if p_len > t_len:
return False
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(__lowercase ):
__UpperCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCamelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCamelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowercase__ ( ) -> None:
"""simple docstring"""
__UpperCamelCase = 'abc1abc12'
__UpperCamelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__UpperCamelCase = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(__lowercase , __lowercase ) and not rabin_karp(__lowercase , __lowercase )
# Test 2)
__UpperCamelCase = 'ABABX'
__UpperCamelCase = 'ABABZABABYABABX'
assert rabin_karp(__lowercase , __lowercase )
# Test 3)
__UpperCamelCase = 'AAAB'
__UpperCamelCase = 'ABAAAAAB'
assert rabin_karp(__lowercase , __lowercase )
# Test 4)
__UpperCamelCase = 'abcdabcy'
__UpperCamelCase = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(__lowercase , __lowercase )
# Test 5)
__UpperCamelCase = 'Lü'
__UpperCamelCase = 'Lüsai'
assert rabin_karp(__lowercase , __lowercase )
__UpperCamelCase = 'Lue'
assert not rabin_karp(__lowercase , __lowercase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 53
| 0
|
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : int ):
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(__lowercase , __lowercase ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
__UpperCamelCase =rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__UpperCamelCase =years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62
|
'''simple docstring'''
from __future__ import annotations
class snake_case :
"""simple docstring"""
def __init__( self : Optional[int] , __A : list[list[int]] ):
__UpperCamelCase = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(__A ) != 0:
__UpperCamelCase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__A ) != cols:
raise error
for value in row:
if not isinstance(__A , (int, float) ):
raise error
__UpperCamelCase = rows
else:
__UpperCamelCase = []
def _lowerCamelCase ( self : int ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _lowerCamelCase ( self : str ):
return len(self.rows )
@property
def _lowerCamelCase ( self : Any ):
return len(self.rows[0] )
@property
def _lowerCamelCase ( self : Optional[Any] ):
return (self.num_rows, self.num_columns)
@property
def _lowerCamelCase ( self : Dict ):
return self.order[0] == self.order[1]
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__A )
def _lowerCamelCase ( self : Any ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _lowerCamelCase ( self : List[str] ):
return bool(self.determinant() )
def _lowerCamelCase ( self : Dict , __A : int , __A : int ):
__UpperCamelCase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__A ).determinant()
def _lowerCamelCase ( self : Dict , __A : int , __A : int ):
if (row + column) % 2 == 0:
return self.get_minor(__A , __A )
return -1 * self.get_minor(__A , __A )
def _lowerCamelCase ( self : List[str] ):
return Matrix(
[
[self.get_minor(__A , __A ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _lowerCamelCase ( self : Union[str, Any] ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__A )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self : Optional[Any] ):
return str(self.rows )
def __str__( self : Union[str, Any] ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(__A ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def _lowerCamelCase ( self : List[Any] , __A : list[int] , __A : int | None = None ):
__UpperCamelCase = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(__A , __A ):
raise type_error
for value in row:
if not isinstance(__A , (int, float) ):
raise type_error
if len(__A ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(__A )
else:
__UpperCamelCase = self.rows[0:position] + [row] + self.rows[position:]
def _lowerCamelCase ( self : Optional[Any] , __A : list[int] , __A : int | None = None ):
__UpperCamelCase = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(__A , __A ):
raise type_error
for value in column:
if not isinstance(__A , (int, float) ):
raise type_error
if len(__A ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
__UpperCamelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
__UpperCamelCase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Tuple , __A : object ):
if not isinstance(__A , __A ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , __A : object ):
return not self == other
def __neg__( self : List[Any] ):
return self * -1
def __add__( self : List[str] , __A : Matrix ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : str , __A : Matrix ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : str , __A : Matrix | int | float ):
if isinstance(__A , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__A , __A ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(__A , __A ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self : Union[str, Any] , __A : int ):
if not isinstance(__A , __A ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
__UpperCamelCase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _lowerCamelCase ( cls : Tuple , __A : list[int] , __A : list[int] ):
return sum(row[i] * column[i] for i in range(len(__A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 0
|
'''simple docstring'''
import torch
from transformers import AutoModel
class __SCREAMING_SNAKE_CASE (torch.nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __a : List[Any]="sayef/fsner-bert-base-uncased" ):
super(__A , self ).__init__()
_a = AutoModel.from_pretrained(__A , return_dict=__A )
_a = torch.nn.CosineSimilarity(3 , 1e-0_8 )
_a = torch.nn.Softmax(dim=1 )
def UpperCamelCase__ ( self : Tuple , **__a : Optional[int] ):
return self.bert(**__A ).last_hidden_state
def UpperCamelCase__ ( self : Tuple , __a : Tuple ):
return token_embeddings.sum(2 , keepdim=__A )
def UpperCamelCase__ ( self : List[Any] , __a : str , __a : int , __a : str=1 ):
return self.softmax(T * self.cos(__A , __A ) )
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : Any ):
_a = W_supports["sizes"].tolist()
_a = W_supports["start_token_id"].item()
_a = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_a = self.BERT(**__A )
_a = self.BERT(**__A )
_a = None
_a = None
_a = W_supports["input_ids"] == start_token_id
_a = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__A ):
if i == 0:
_a = 0
else:
_a = support_sizes[i - 1]
_a = S[s : s + size][start_token_masks[s : s + size]]
_a = S[s : s + size][end_token_masks[s : s + size]]
_a = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_a = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_a = torch.vstack((p_starts, p_start) )
_a = torch.vstack((p_ends, p_end) )
else:
_a = p_start
_a = p_end
return p_starts, p_ends
| 63
|
'''simple docstring'''
import os
import numpy
import onnx
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__UpperCamelCase = a.name
__UpperCamelCase = b.name
__UpperCamelCase = ''
__UpperCamelCase = ''
__UpperCamelCase = a == b
__UpperCamelCase = name_a
__UpperCamelCase = name_b
return res
def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : List[Any] ) -> Optional[int]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__lowercase , __lowercase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase )
_graph_replace_input_with(node_proto.attribute[1].g , __lowercase , __lowercase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase )
def lowercase__ ( __lowercase : int , __lowercase : List[Any] , __lowercase : Dict ) -> int:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(__lowercase , __lowercase , __lowercase )
def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : str ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = list(model.graph.initializer )
__UpperCamelCase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__UpperCamelCase = inits[i].name
__UpperCamelCase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __lowercase , __lowercase )
def lowercase__ ( __lowercase : Dict ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = os.path.dirname(__lowercase )
__UpperCamelCase = os.path.basename(__lowercase )
__UpperCamelCase = onnx.load(os.path.join(__lowercase , __lowercase ) )
__UpperCamelCase = list(model.graph.initializer )
__UpperCamelCase = set()
__UpperCamelCase = {}
__UpperCamelCase = []
__UpperCamelCase = 0
for i in range(len(__lowercase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__lowercase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__lowercase )
dup_set.add(__lowercase )
__UpperCamelCase = inits[j].data_type
__UpperCamelCase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , __lowercase )
total_reduced_size += mem_size
__UpperCamelCase = inits[i].name
__UpperCamelCase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__lowercase )
else:
__UpperCamelCase = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
__UpperCamelCase = sorted(__lowercase )
_remove_dup_initializers_from_model(__lowercase , __lowercase , __lowercase )
__UpperCamelCase = 'optimized_' + model_file_name
__UpperCamelCase = os.path.join(__lowercase , __lowercase )
onnx.save(__lowercase , __lowercase )
return new_model
| 53
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCAmelCase__ ( __lowerCamelCase ):
lowerCAmelCase : Optional[int] = "dpt"
def __init__( self : int , lowerCamelCase__ : Optional[int]=7_68 , lowerCamelCase__ : Any=12 , lowerCamelCase__ : Tuple=12 , lowerCamelCase__ : Optional[Any]=30_72 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : List[Any]=0.0_2 , lowerCamelCase__ : Dict=1E-12 , lowerCamelCase__ : List[Any]=3_84 , lowerCamelCase__ : List[str]=16 , lowerCamelCase__ : Union[str, Any]=3 , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : str=True , lowerCamelCase__ : Union[str, Any]=[2, 5, 8, 11] , lowerCamelCase__ : Any="project" , lowerCamelCase__ : List[str]=[4, 2, 1, 0.5] , lowerCamelCase__ : Union[str, Any]=[96, 1_92, 3_84, 7_68] , lowerCamelCase__ : List[str]=2_56 , lowerCamelCase__ : str=-1 , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Union[str, Any]=0.4 , lowerCamelCase__ : int=2_55 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[Any]=[1, 10_24, 24, 24] , lowerCamelCase__ : List[str]=[0, 1] , lowerCamelCase__ : Optional[Any]=None , **lowerCamelCase__ : Tuple , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**__A )
_UpperCAmelCase : Dict = hidden_size
_UpperCAmelCase : Optional[Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
_UpperCAmelCase : Any = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
_UpperCAmelCase : str = BitConfig(**__A )
elif isinstance(__A , __A ):
logger.info("Initializing the config with a `BiT` backbone." )
_UpperCAmelCase : List[str] = BitConfig(**__A )
elif isinstance(__A , __A ):
_UpperCAmelCase : Dict = backbone_config
else:
raise ValueError(
F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
_UpperCAmelCase : Tuple = backbone_featmap_shape
_UpperCAmelCase : Optional[Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be \'project\' when using `DPT-hybrid` mode." )
else:
_UpperCAmelCase : Any = None
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : int = layer_norm_eps
_UpperCAmelCase : Dict = image_size
_UpperCAmelCase : Dict = patch_size
_UpperCAmelCase : List[str] = num_channels
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : Union[str, Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of [\'ignore\', \'add\', \'project\']" )
_UpperCAmelCase : List[Any] = readout_type
_UpperCAmelCase : Optional[Any] = reassemble_factors
_UpperCAmelCase : int = neck_hidden_sizes
_UpperCAmelCase : Tuple = fusion_hidden_size
_UpperCAmelCase : Union[str, Any] = head_in_index
_UpperCAmelCase : Tuple = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase : Union[str, Any] = use_auxiliary_head
_UpperCAmelCase : Optional[int] = auxiliary_loss_weight
_UpperCAmelCase : Tuple = semantic_loss_ignore_index
_UpperCAmelCase : Optional[int] = semantic_classifier_dropout
def lowerCAmelCase__ ( self : List[Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_UpperCAmelCase : Tuple = self.backbone_config.to_dict()
_UpperCAmelCase : str = self.__class__.model_type
return output
| 234
|
'''simple docstring'''
import random
def lowercase__ ( __lowercase : list , __lowercase : Optional[Any] ) -> tuple:
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = [], [], []
for element in data:
if element < pivot:
less.append(__lowercase )
elif element > pivot:
greater.append(__lowercase )
else:
equal.append(__lowercase )
return less, equal, greater
def lowercase__ ( __lowercase : list , __lowercase : int ) -> Dict:
"""simple docstring"""
if index >= len(__lowercase ) or index < 0:
return None
__UpperCamelCase = items[random.randint(0 , len(__lowercase ) - 1 )]
__UpperCamelCase = 0
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = _partition(__lowercase , __lowercase )
__UpperCamelCase = len(__lowercase )
__UpperCamelCase = len(__lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__lowercase , __lowercase )
# must be in larger
else:
return quick_select(__lowercase , index - (m + count) )
| 53
| 0
|
'''simple docstring'''
def UpperCAmelCase ( a_ = 1_0**9 ) -> int:
"""simple docstring"""
A_ : Any = 1
A_ : Tuple = 2
A_ : Any = 0
A_ : int = 0
A_ : Tuple = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
A_ : Optional[int] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'{solution() = }')
| 344
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase__ ( __lowercase : Any ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def lowercase__ ( __lowercase : Tuple ) -> int:
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase = emb.weight.shape
__UpperCamelCase = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
__UpperCamelCase = emb.weight.data
return lin_layer
def lowercase__ ( __lowercase : int , __lowercase : List[str]="facebook/mbart-large-en-ro" , __lowercase : str=False , __lowercase : List[Any]=False ) -> int:
"""simple docstring"""
__UpperCamelCase = torch.load(__lowercase , map_location='cpu' )['model']
remove_ignore_keys_(__lowercase )
__UpperCamelCase = state_dict['encoder.embed_tokens.weight'].shape[0]
__UpperCamelCase = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase )
if mbart_aa and finetuned:
__UpperCamelCase = 'relu'
__UpperCamelCase = state_dict['decoder.embed_tokens.weight']
__UpperCamelCase = MBartForConditionalGeneration(__lowercase )
model.model.load_state_dict(__lowercase )
if finetuned:
__UpperCamelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a__ : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
a__ : Union[str, Any] =parser.parse_args()
a__ : str =convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 53
| 0
|
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if digit_amount > 0:
return round(number - int(__lowercase ) , __lowercase )
return number - int(__lowercase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 151
|
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Any , __A : Dict , __A : str , __A : List[Any]=1_0_2_4 , __A : Tuple=1_0_2_4 , __A : str=3.6 ):
__UpperCamelCase = tokenizer
__UpperCamelCase = tokenizer.bos_token_id
__UpperCamelCase = dataset
__UpperCamelCase = seq_length
__UpperCamelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Any ):
__UpperCamelCase = iter(self.dataset )
__UpperCamelCase = True
while more_examples:
__UpperCamelCase , __UpperCamelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__A )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
__UpperCamelCase = False
break
__UpperCamelCase = tokenizer(__A , truncation=__A )['input_ids']
__UpperCamelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__A ) , self.seq_length ):
__UpperCamelCase = all_token_ids[i : i + self.seq_length]
if len(__A ) == self.seq_length:
yield torch.tensor(__A )
def lowercase__ ( __lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = {'streaming': True}
__UpperCamelCase = load_dataset(args.dataset_name , split='train' , **__lowercase )
__UpperCamelCase = ConstantLengthDataset(__lowercase , __lowercase , seq_length=args.seq_length )
__UpperCamelCase = DataLoader(__lowercase , batch_size=args.batch_size )
return eval_dataloader
def lowercase__ ( __lowercase : Tuple ) -> Optional[Any]:
"""simple docstring"""
model.eval()
__UpperCamelCase = []
for step, batch in enumerate(__lowercase ):
with torch.no_grad():
__UpperCamelCase = model(__lowercase , labels=__lowercase )
__UpperCamelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__lowercase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__UpperCamelCase = torch.mean(torch.cat(__lowercase ) )
try:
__UpperCamelCase = torch.exp(__lowercase )
except OverflowError:
__UpperCamelCase = float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
a__ : int =Accelerator()
# Parse configuration
a__ : Dict =HfArgumentParser(EvaluationArguments)
a__ : Union[str, Any] =parser.parse_args()
set_seed(args.seed)
# Logging
a__ : List[Any] =logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
a__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(args.model_ckpt)
a__ : List[Any] =AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
a__ : Union[str, Any] =create_dataloader(args)
# Prepare everything with our `accelerator`.
a__ , a__ : List[str] =accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
a__ , a__ : Any =evaluate(args)
logger.info(f'loss/eval: {eval_loss}, perplexity: {perplexity}')
| 53
| 0
|
def A__ ( __lowerCamelCase ): # noqa: E741
SCREAMING_SNAKE_CASE_ = len(__lowercase )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = [0] * n
SCREAMING_SNAKE_CASE_ = [False] * n
SCREAMING_SNAKE_CASE_ = [False] * n
def dfs(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if parent == root:
out_edge_count += 1
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
SCREAMING_SNAKE_CASE_ = dfs(__lowercase, __lowercase, __lowercase, __lowercase )
SCREAMING_SNAKE_CASE_ = min(low[at], low[to] )
# AP found via bridge
if at < low[to]:
SCREAMING_SNAKE_CASE_ = True
# AP found via cycle
if at == low[to]:
SCREAMING_SNAKE_CASE_ = True
else:
SCREAMING_SNAKE_CASE_ = min(low[at], __lowercase )
return out_edge_count
for i in range(__lowercase ):
if not visited[i]:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = dfs(__lowercase, __lowercase, -1, __lowercase )
SCREAMING_SNAKE_CASE_ = out_edge_count > 1
for x in range(len(__lowercase ) ):
if is_art[x] is True:
print(__lowercase )
# Adjacency list of graph
__UpperCAmelCase = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 299
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
a__ : Any =logging.get_logger(__name__)
a__ : Optional[Any] ={
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict ="gpt_neo"
SCREAMING_SNAKE_CASE_ : Optional[int] =["past_key_values"]
SCREAMING_SNAKE_CASE_ : List[Any] ={"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : Union[str, Any] , __A : Union[str, Any]=5_0_2_5_7 , __A : Any=2_0_4_8 , __A : Optional[Any]=2_0_4_8 , __A : Any=2_4 , __A : Union[str, Any]=[[["global", "local"], 1_2]] , __A : str=1_6 , __A : Optional[int]=None , __A : Union[str, Any]=2_5_6 , __A : Any="gelu_new" , __A : Dict=0.0 , __A : Optional[int]=0.0 , __A : int=0.0 , __A : List[str]=0.1 , __A : Any=1e-5 , __A : int=0.02 , __A : List[str]=True , __A : Tuple=5_0_2_5_6 , __A : Optional[Any]=5_0_2_5_6 , **__A : Optional[Any] , ):
__UpperCamelCase = vocab_size
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = hidden_size
__UpperCamelCase = num_layers
__UpperCamelCase = num_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = window_size
__UpperCamelCase = activation_function
__UpperCamelCase = resid_dropout
__UpperCamelCase = embed_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = classifier_dropout
__UpperCamelCase = layer_norm_epsilon
__UpperCamelCase = initializer_range
__UpperCamelCase = use_cache
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
__UpperCamelCase = attention_types
__UpperCamelCase = self.expand_attention_types_params(__A )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
@staticmethod
def _lowerCamelCase ( __A : Tuple ):
__UpperCamelCase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase__ ( __lowercase : Tuple , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : List[str] ) -> Any:
"""simple docstring"""
import torch
__UpperCamelCase = input.size()
__UpperCamelCase = len(__lowercase )
__UpperCamelCase = shape[dimension]
__UpperCamelCase = torch.arange(0 , __lowercase , __lowercase )
__UpperCamelCase = torch.div(sizedim - size , __lowercase , rounding_mode='floor' ) + 1
__UpperCamelCase = torch.arange(__lowercase ) + low_indices[:min_length][:, None]
__UpperCamelCase = [slice(__lowercase )] * rank
__UpperCamelCase = indices
__UpperCamelCase = input[s]
__UpperCamelCase = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__lowercase )
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
import torch
__UpperCamelCase = torch.arange(1 , __lowercase )
__UpperCamelCase = torch.remainder(__lowercase , __lowercase )
__UpperCamelCase = remainders == 0
__UpperCamelCase = candidates[divisor_indices]
__UpperCamelCase = torch.max(__lowercase )
return largest_divisor, torch.div(__lowercase , __lowercase , rounding_mode='floor' )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
@property
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__A , direction='inputs' )
__UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def _lowerCamelCase ( self : int ):
return self._config.num_heads
def _lowerCamelCase ( self : List[str] , __A : PreTrainedTokenizer , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional[TensorType] = None , ):
__UpperCamelCase = super(__A , self ).generate_dummy_inputs(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
# We need to order the input in the way they appears in the forward()
__UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__UpperCamelCase , __UpperCamelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__UpperCamelCase = seqlen + 2
__UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
__UpperCamelCase = common_inputs['attention_mask']
if self.use_past:
__UpperCamelCase = ordered_inputs['attention_mask'].dtype
__UpperCamelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__A , __A , dtype=__A )] , dim=1 )
return ordered_inputs
@property
def _lowerCamelCase ( self : Dict ):
return 1_3
| 53
| 0
|
'''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class a__( __lowerCamelCase ):
lowercase__ = "EncodecFeatureExtractor"
lowercase__ = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[Any] ):
super().__init__(__A , __A )
a : Optional[int] = self.feature_extractor
a : Any = False
def lowercase_ ( self : Dict , __snake_case : Dict=None , __snake_case : Dict=None , __snake_case : Union[str, Any]=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self : Union[str, Any] , *__snake_case : List[Any] , **__snake_case : List[Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
a : str = kwargs.pop('audio' , __A )
a : Union[str, Any] = kwargs.pop('sampling_rate' , __A )
a : Union[str, Any] = kwargs.pop('text' , __A )
if len(__A ) > 0:
a : Dict = args[0]
a : Dict = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if text is not None:
a : Optional[int] = self.tokenizer(__A , **__A )
if audio is not None:
a : Optional[Any] = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
a : Tuple = audio_inputs['input_values']
if "padding_mask" in audio_inputs:
a : Tuple = audio_inputs['padding_mask']
return inputs
def lowercase_ ( self : Optional[Any] , *__snake_case : str , **__snake_case : Dict ):
a : List[str] = kwargs.pop('audio' , __A )
a : int = kwargs.pop('padding_mask' , __A )
if len(__A ) > 0:
a : str = args[0]
a : int = args[1:]
if audio_values is not None:
return self._decode_audio(__A , padding_mask=__A )
else:
return self.tokenizer.batch_decode(*__A , **__A )
def lowercase_ ( self : str , *__snake_case : List[str] , **__snake_case : List[Any] ):
return self.tokenizer.decode(*__A , **__A )
def lowercase_ ( self : Dict , __snake_case : List[Any] , __snake_case : Optional = None ):
a : str = to_numpy(__A )
a , a , a : int = audio_values.shape
if padding_mask is None:
return list(__A )
a : int = to_numpy(__A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
a : str = seq_len - padding_mask.shape[-1]
a : List[str] = 1 - self.feature_extractor.padding_value
a : Optional[int] = np.pad(__A , ((0, 0), (0, difference)) , 'constant' , constant_values=__A )
a : List[Any] = audio_values.tolist()
for i in range(__A ):
a : Optional[int] = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
a : Optional[Any] = sliced_audio.reshape(__A , -1 )
return audio_values
| 297
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="naver-clova-ix/donut-base-finetuned-docvqa"
SCREAMING_SNAKE_CASE_ : Dict =(
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
SCREAMING_SNAKE_CASE_ : List[str] ="document_qa"
SCREAMING_SNAKE_CASE_ : Union[str, Any] =AutoProcessor
SCREAMING_SNAKE_CASE_ : Union[str, Any] =VisionEncoderDecoderModel
SCREAMING_SNAKE_CASE_ : List[Any] =["image", "text"]
SCREAMING_SNAKE_CASE_ : Any =["text"]
def __init__( self : Optional[int] , *__A : List[str] , **__A : List[Any] ):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*__A , **__A )
def _lowerCamelCase ( self : Any , __A : "Image" , __A : str ):
__UpperCamelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__UpperCamelCase = task_prompt.replace('{user_input}' , __A )
__UpperCamelCase = self.pre_processor.tokenizer(
__A , add_special_tokens=__A , return_tensors='pt' ).input_ids
__UpperCamelCase = self.pre_processor(__A , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _lowerCamelCase ( self : Union[str, Any] , __A : Optional[Any] ):
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__A , ).sequences
def _lowerCamelCase ( self : Tuple , __A : List[Any] ):
__UpperCamelCase = self.pre_processor.batch_decode(__A )[0]
__UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
__UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
__UpperCamelCase = re.sub(R'<.*?>' , '' , __A , count=1 ).strip() # remove first task start token
__UpperCamelCase = self.pre_processor.tokenajson(__A )
return sequence["answer"]
| 53
| 0
|
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
_a : Tuple = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
_a : Tuple = [0, 25, 50]
_a : List[str] = [25, 50, 75]
_a : List[Any] = fuzz.membership.trimf(X, abca)
_a : List[str] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
_a : Dict = np.ones(75)
_a : Tuple = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
_a : Optional[int] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
_a : Union[str, Any] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
_a : str = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
_a : Optional[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
_a : List[str] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
_a : Optional[int] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
_a : Union[str, Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
_a : int = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 44
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 53
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A: List[Any] = logging.get_logger(__name__)
A: Tuple = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( __lowerCamelCase , __lowerCamelCase ):
__lowerCAmelCase : int = "focalnet"
def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=96 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=[192, 384, 768, 768] , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[3, 3, 3, 3] , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1E-4 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> str:
'''simple docstring'''
super().__init__(**__A )
UpperCAmelCase : Optional[int] = image_size
UpperCAmelCase : int = patch_size
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : Dict = embed_dim
UpperCAmelCase : Any = use_conv_embed
UpperCAmelCase : Dict = hidden_sizes
UpperCAmelCase : Tuple = depths
UpperCAmelCase : Union[str, Any] = focal_levels
UpperCAmelCase : Any = focal_windows
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Dict = mlp_ratio
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : List[Any] = drop_path_rate
UpperCAmelCase : Any = use_layerscale
UpperCAmelCase : Optional[Any] = layerscale_value
UpperCAmelCase : List[str] = use_post_layernorm
UpperCAmelCase : Dict = use_post_layernorm_in_modulation
UpperCAmelCase : str = normalize_modulator
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Dict = layer_norm_eps
UpperCAmelCase : Any = encoder_stride
UpperCAmelCase : Tuple = ["""stem"""] + [F"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase , UpperCAmelCase : int = get_aligned_output_features_output_indices(
out_features=__A , out_indices=__A , stage_names=self.stage_names )
| 109
|
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowercase__ ( __lowercase : Features ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = np.inf
def set_batch_size(__lowercase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(__lowercase , __lowercase ):
__UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__lowercase , __lowercase ):
__UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__lowercase , __lowercase ) and feature.dtype == "binary":
__UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__lowercase , __lowercase )
return None if batch_size is np.inf else batch_size
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , __A : NestedDataStructureLike[PathLike] , __A : Optional[NamedSplit] = None , __A : Optional[Features] = None , __A : str = None , __A : bool = False , __A : bool = False , __A : Optional[int] = None , **__A : Dict , ):
super().__init__(
__A , split=__A , features=__A , cache_dir=__A , keep_in_memory=__A , streaming=__A , num_proc=__A , **__A , )
__UpperCamelCase = path_or_paths if isinstance(__A , __A ) else {self.split: path_or_paths}
__UpperCamelCase = _PACKAGED_DATASETS_MODULES['parquet'][1]
__UpperCamelCase = Parquet(
cache_dir=__A , data_files=__A , features=__A , hash=__A , **__A , )
def _lowerCamelCase ( self : Optional[int] ):
# Build iterable dataset
if self.streaming:
__UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
self.builder.download_and_prepare(
download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , num_proc=self.num_proc , )
__UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=__A , in_memory=self.keep_in_memory )
return dataset
class snake_case :
"""simple docstring"""
def __init__( self : List[str] , __A : Dataset , __A : Union[PathLike, BinaryIO] , __A : Optional[int] = None , **__A : Dict , ):
__UpperCamelCase = dataset
__UpperCamelCase = path_or_buf
__UpperCamelCase = batch_size or get_writer_batch_size(dataset.features )
__UpperCamelCase = parquet_writer_kwargs
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__UpperCamelCase = self._write(file_obj=__A , batch_size=__A , **self.parquet_writer_kwargs )
else:
__UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=__A , **self.parquet_writer_kwargs )
return written
def _lowerCamelCase ( self : List[str] , __A : BinaryIO , __A : int , **__A : List[str] ):
__UpperCamelCase = 0
__UpperCamelCase = parquet_writer_kwargs.pop('path_or_buf' , __A )
__UpperCamelCase = self.dataset.features.arrow_schema
__UpperCamelCase = pq.ParquetWriter(__A , schema=__A , **__A )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __A ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__UpperCamelCase = query_table(
table=self.dataset._data , key=slice(__A , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__A )
written += batch.nbytes
writer.close()
return written
| 53
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a__ : Optional[Any] = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = ['''ConvNextFeatureExtractor''']
a__ : List[Any] = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
a__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 54
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
__SCREAMING_SNAKE_CASE = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__SCREAMING_SNAKE_CASE = 1
if upper_limit > 0:
__SCREAMING_SNAKE_CASE = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowerCAmelCase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
a__ : List[str] = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"The Catalan numbers from 0 through {N} are:")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 54
| 1
|
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
a__ : Any = HfArgumentParser(InitializationArguments)
a__ : Any = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
a__ : int = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
a__ : Dict = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
a__ : str = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
a__ : int = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 54
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
a__ : Tuple = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
snake_case__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : Optional[str] = field(default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
if self.train_file is not None:
__SCREAMING_SNAKE_CASE = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__SCREAMING_SNAKE_CASE = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : PreTrainedTokenizerBase
snake_case__ : Union[bool, str, PaddingStrategy] = True
snake_case__ : Optional[int] = None
snake_case__ : Optional[int] = None
def __call__( self : int , UpperCAmelCase__ : Any ) -> str:
__SCREAMING_SNAKE_CASE = "label" if "label" in features[0].keys() else "labels"
__SCREAMING_SNAKE_CASE = [feature.pop(UpperCAmelCase__ ) for feature in features]
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = len(features[0]["input_ids"] )
__SCREAMING_SNAKE_CASE = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCAmelCase__ )] for feature in features
]
__SCREAMING_SNAKE_CASE = list(chain(*UpperCAmelCase__ ) )
__SCREAMING_SNAKE_CASE = self.tokenizer.pad(
UpperCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
__SCREAMING_SNAKE_CASE = {k: v.view(UpperCAmelCase__ , UpperCAmelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
__SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ , dtype=torch.intaa )
return batch
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , lowerCAmelCase_ , lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
datasets.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__SCREAMING_SNAKE_CASE = {}
if data_args.train_file is not None:
__SCREAMING_SNAKE_CASE = data_args.train_file
if data_args.validation_file is not None:
__SCREAMING_SNAKE_CASE = data_args.validation_file
__SCREAMING_SNAKE_CASE = data_args.train_file.split("." )[-1]
__SCREAMING_SNAKE_CASE = load_dataset(
lowerCAmelCase_ , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__SCREAMING_SNAKE_CASE = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__SCREAMING_SNAKE_CASE = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__SCREAMING_SNAKE_CASE = [f"""ending{i}""" for i in range(4 )]
__SCREAMING_SNAKE_CASE = "sent1"
__SCREAMING_SNAKE_CASE = "sent2"
if data_args.max_seq_length is None:
__SCREAMING_SNAKE_CASE = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
__SCREAMING_SNAKE_CASE = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__SCREAMING_SNAKE_CASE = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = [[context] * 4 for context in examples[context_name]]
__SCREAMING_SNAKE_CASE = examples[question_header_name]
__SCREAMING_SNAKE_CASE = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(lowerCAmelCase_ )
]
# Flatten out
__SCREAMING_SNAKE_CASE = list(chain(*lowerCAmelCase_ ) )
__SCREAMING_SNAKE_CASE = list(chain(*lowerCAmelCase_ ) )
# Tokenize
__SCREAMING_SNAKE_CASE = tokenizer(
lowerCAmelCase_ , lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCAmelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
__SCREAMING_SNAKE_CASE = raw_datasets["train"]
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE = min(len(lowerCAmelCase_ ) , data_args.max_train_samples )
__SCREAMING_SNAKE_CASE = train_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
__SCREAMING_SNAKE_CASE = train_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
__SCREAMING_SNAKE_CASE = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE = min(len(lowerCAmelCase_ ) , data_args.max_eval_samples )
__SCREAMING_SNAKE_CASE = eval_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
__SCREAMING_SNAKE_CASE = eval_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__SCREAMING_SNAKE_CASE = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCAmelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = eval_predictions
__SCREAMING_SNAKE_CASE = np.argmax(lowerCAmelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE = last_checkpoint
__SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
__SCREAMING_SNAKE_CASE = train_result.metrics
__SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("train" , lowerCAmelCase_ )
trainer.save_metrics("train" , lowerCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__SCREAMING_SNAKE_CASE = trainer.evaluate()
__SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("eval" , lowerCAmelCase_ )
trainer.save_metrics("eval" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 54
| 1
|
"""simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
a__ : Any = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
a__ : str = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
a__ : int = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return float((preds == labels).mean() )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = float(fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = float(pearsonr(lowerCAmelCase_ , lowerCAmelCase_ )[0] )
__SCREAMING_SNAKE_CASE = float(spearmanr(lowerCAmelCase_ , lowerCAmelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCamelCase_ ( datasets.Metric):
"""simple docstring"""
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> Optional[Any]:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "stsb":
return pearson_and_spearman(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
| 54
|
"""simple docstring"""
from PIL import Image
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = image.size
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = image.load()
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCAmelCase_ ):
for i in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
a__ : List[str] = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 54
| 1
|
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
a__ : int = re.compile(r'''\s+''')
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(lowerCAmelCase_ , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [len(lowerCAmelCase_ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(lowerCAmelCase_ ), "line_max": max(lowerCAmelCase_ )}
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_=5 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["auto-generated", "autogenerated", "automatically generated"]
__SCREAMING_SNAKE_CASE = example["content"].splitlines()
for _, line in zip(range(lowerCAmelCase_ ) , lowerCAmelCase_ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_=5 , lowerCAmelCase_=0.05 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["unit tests", "test file", "configuration file"]
__SCREAMING_SNAKE_CASE = example["content"].splitlines()
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
# first test
for _, line in zip(range(lowerCAmelCase_ ) , lowerCAmelCase_ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
__SCREAMING_SNAKE_CASE = example["content"].count("\n" )
__SCREAMING_SNAKE_CASE = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["def ", "class ", "for ", "while "]
__SCREAMING_SNAKE_CASE = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_=4 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = example["content"].splitlines()
__SCREAMING_SNAKE_CASE = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tokenizer(example["content"] , truncation=lowerCAmelCase_ )["input_ids"]
__SCREAMING_SNAKE_CASE = len(example["content"] ) / len(lowerCAmelCase_ )
return {"ratio": ratio}
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
results.update(get_hash(lowerCAmelCase_ ) )
results.update(line_stats(lowerCAmelCase_ ) )
results.update(alpha_stats(lowerCAmelCase_ ) )
results.update(char_token_ratio(lowerCAmelCase_ ) )
results.update(is_autogenerated(lowerCAmelCase_ ) )
results.update(is_config_or_test(lowerCAmelCase_ ) )
results.update(has_no_keywords(lowerCAmelCase_ ) )
results.update(has_few_assignments(lowerCAmelCase_ ) )
return results
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if not check_uniques(lowerCAmelCase_ , lowerCAmelCase_ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
with open(lowerCAmelCase_ , "rb" ) as f_in:
with gzip.open(str(lowerCAmelCase_ ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCAmelCase_ , lowerCAmelCase_ )
os.unlink(lowerCAmelCase_ )
# Settings
a__ : List[str] = HfArgumentParser(PreprocessingArguments)
a__ : List[str] = parser.parse_args()
if args.num_workers is None:
a__ : Tuple = multiprocessing.cpu_count()
a__ : int = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
a__ : Optional[int] = time.time()
a__ : List[Any] = load_dataset(args.dataset_name, split='''train''')
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
a__ : Any = time.time()
a__ : Dict = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
a__ : Tuple = set(ds.unique('''hash'''))
a__ : Union[str, Any] = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
a__ : Optional[Any] = time.time()
a__ : str = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
a__ : str = time.time()
a__ , a__ : str = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
a__ : Dict = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
a__ : List[str] = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
a__ : List[str] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
a__ : int = str(data_dir / F"file-{file_number+1:012}.json")
a__ : Union[str, Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}")
| 54
|
"""simple docstring"""
from jiwer import compute_measures
import datasets
a__ : Optional[int] = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
a__ : List[str] = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
a__ : Dict = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCamelCase_ ( datasets.Metric):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[Any] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=False ) -> Optional[int]:
if concatenate_texts:
return compute_measures(UpperCAmelCase__ , UpperCAmelCase__ )["wer"]
else:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for prediction, reference in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = compute_measures(UpperCAmelCase__ , UpperCAmelCase__ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 54
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ):
'''simple docstring'''
if attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : Dict = OPTConfig
snake_case__ : List[Any] = {}
snake_case__ : List[str] = "gelu"
def __init__( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str]=1_3 , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : str=9_9 , UpperCAmelCase__ : Dict=1_6 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : int=4 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : List[str]=2_0 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : Optional[Any]=1_6 , UpperCAmelCase__ : List[str]=1_6 , ) -> List[str]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = embed_dim
__SCREAMING_SNAKE_CASE = word_embed_proj_dim
__SCREAMING_SNAKE_CASE = False
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
__SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=UpperCAmelCase__ , **self.config_updates , )
__SCREAMING_SNAKE_CASE = prepare_opt_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ )
return config, inputs_dict
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] ) -> str:
__SCREAMING_SNAKE_CASE = TFOPTModel(config=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = inputs_dict["input_ids"]
__SCREAMING_SNAKE_CASE = input_ids[:1, :]
__SCREAMING_SNAKE_CASE = inputs_dict["attention_mask"][:1, :]
__SCREAMING_SNAKE_CASE = 1
# first forward pass
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1 )
__SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
__SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-3 )
@require_tf
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
snake_case__ : Optional[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
snake_case__ : Tuple = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
snake_case__ : Any = False
snake_case__ : Union[str, Any] = False
snake_case__ : List[str] = False
snake_case__ : Tuple = 10
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE = TFOPTModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> str:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ):
if hasattr(UpperCAmelCase__ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(UpperCAmelCase__ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
__SCREAMING_SNAKE_CASE = model_class(config=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = _get_word_embedding_weight(UpperCAmelCase__ , model.get_input_embeddings() )
__SCREAMING_SNAKE_CASE = _get_word_embedding_weight(UpperCAmelCase__ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = _get_word_embedding_weight(UpperCAmelCase__ , model.get_input_embeddings() )
__SCREAMING_SNAKE_CASE = _get_word_embedding_weight(UpperCAmelCase__ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__SCREAMING_SNAKE_CASE = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , UpperCAmelCase__ )
# check that weights remain the same after resizing
__SCREAMING_SNAKE_CASE = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__SCREAMING_SNAKE_CASE = False
self.assertTrue(UpperCAmelCase__ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__SCREAMING_SNAKE_CASE = False
self.assertTrue(UpperCAmelCase__ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return tf.constant(lowerCAmelCase_ , dtype=tf.intaa )
@require_tf
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : int = 99
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__SCREAMING_SNAKE_CASE = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__SCREAMING_SNAKE_CASE = input_ids.shape[0]
__SCREAMING_SNAKE_CASE = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = TFOPTModel.from_pretrained("facebook/opt-350m" )
__SCREAMING_SNAKE_CASE = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__SCREAMING_SNAKE_CASE = tf.not_equal(UpperCAmelCase__ , model.config.pad_token_id )
with tf.GradientTape():
__SCREAMING_SNAKE_CASE = model(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ).last_hidden_state
__SCREAMING_SNAKE_CASE = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=4E-3 ) )
__SCREAMING_SNAKE_CASE = tf.function(UpperCAmelCase__ , jit_compile=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = xla_generate(UpperCAmelCase__ , UpperCAmelCase__ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=4E-2 ) )
@require_tf
@slow
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
super().setUp()
__SCREAMING_SNAKE_CASE = "facebook/opt-350m"
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = TFOPTForCausalLM.from_pretrained(self.path_model )
__SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(self.path_model )
__SCREAMING_SNAKE_CASE = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase__ , return_tensors="tf" , padding=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__SCREAMING_SNAKE_CASE = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-4 ) )
__SCREAMING_SNAKE_CASE = tf.function(UpperCAmelCase__ , jit_compile=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-4 ) )
@require_tf
@slow
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : Dict ) -> str:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE = "facebook/opt-125m"
__SCREAMING_SNAKE_CASE = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = TFOPTForCausalLM.from_pretrained(UpperCAmelCase__ )
for prompt in self.prompts:
__SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase__ , return_tensors="tf" ).input_ids
__SCREAMING_SNAKE_CASE = model.generate(UpperCAmelCase__ , max_length=1_0 )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
predicted_outputs += generated_string
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : int ) -> int:
__SCREAMING_SNAKE_CASE = "facebook/opt-350m"
__SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = TFOPTForCausalLM.from_pretrained(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = "left"
# use different length sentences to test batching
__SCREAMING_SNAKE_CASE = [
"Hello, my dog is a little",
"Today, I",
]
__SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase__ , return_tensors="tf" , padding=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = inputs["input_ids"]
__SCREAMING_SNAKE_CASE = model.generate(input_ids=UpperCAmelCase__ , attention_mask=inputs["attention_mask"] )
__SCREAMING_SNAKE_CASE = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
__SCREAMING_SNAKE_CASE = model.generate(input_ids=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
__SCREAMING_SNAKE_CASE = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
__SCREAMING_SNAKE_CASE = model.generate(input_ids=UpperCAmelCase__ , max_length=model.config.max_length - num_paddings )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , [non_padded_sentence, padded_sentence] )
def UpperCAmelCase_ ( self : Any ) -> Any:
__SCREAMING_SNAKE_CASE = "facebook/opt-350m"
__SCREAMING_SNAKE_CASE = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = TFOPTForCausalLM.from_pretrained(UpperCAmelCase__ )
for prompt in self.prompts:
__SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase__ , return_tensors="tf" ).input_ids
__SCREAMING_SNAKE_CASE = model.generate(UpperCAmelCase__ , max_length=1_0 )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
predicted_outputs += generated_string
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 54
|
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [0] * no_of_processes
__SCREAMING_SNAKE_CASE = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = burst_time[i]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 9_9999_9999
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowerCAmelCase_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__SCREAMING_SNAKE_CASE = remaining_time[j]
__SCREAMING_SNAKE_CASE = j
__SCREAMING_SNAKE_CASE = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__SCREAMING_SNAKE_CASE = remaining_time[short]
if minm == 0:
__SCREAMING_SNAKE_CASE = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
__SCREAMING_SNAKE_CASE = False
# Find finish time of current process
__SCREAMING_SNAKE_CASE = increment_time + 1
# Calculate waiting time
__SCREAMING_SNAKE_CASE = finish_time - arrival_time[short]
__SCREAMING_SNAKE_CASE = finar - burst_time[short]
if waiting_time[short] < 0:
__SCREAMING_SNAKE_CASE = 0
# Increment time
increment_time += 1
return waiting_time
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [0] * no_of_processes
for i in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = burst_time[i] + waiting_time[i]
return turn_around_time
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for i in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = total_waiting_time + waiting_time[i]
__SCREAMING_SNAKE_CASE = total_turn_around_time + turn_around_time[i]
print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
a__ : Optional[Any] = int(input())
a__ : Optional[int] = [0] * no_of_processes
a__ : int = [0] * no_of_processes
a__ : List[Any] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
a__ , a__ : Tuple = map(int, input().split())
a__ : int = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
a__ : Dict = burst_time
a__ : Any = no_of_processes
a__ : Optional[int] = waiting_time
a__ : Union[str, Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
a__ : str = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 54
| 1
|
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any]=1_3 , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : List[Any]=9_9 , UpperCAmelCase__ : int=3_2 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : Union[str, Any]=6_4 , UpperCAmelCase__ : Dict="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=5_1_2 , UpperCAmelCase__ : Dict=1_6 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : str=None , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Dict=1 , ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = q_groups
__SCREAMING_SNAKE_CASE = k_groups
__SCREAMING_SNAKE_CASE = v_groups
__SCREAMING_SNAKE_CASE = post_attention_groups
__SCREAMING_SNAKE_CASE = intermediate_groups
__SCREAMING_SNAKE_CASE = output_groups
def UpperCAmelCase_ ( self : str ) -> Any:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] ) -> int:
__SCREAMING_SNAKE_CASE = SqueezeBertModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE = SqueezeBertForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE = SqueezeBertForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> Any:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = SqueezeBertForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = SqueezeBertForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.num_choices
__SCREAMING_SNAKE_CASE = SqueezeBertForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self : int ) -> Dict:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
snake_case__ : Tuple = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Tuple = False
snake_case__ : List[str] = True
snake_case__ : Dict = False
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = SqueezeBertModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , dim=3_7 )
def UpperCAmelCase_ ( self : List[Any] ) -> str:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Any ) -> Any:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Any ) -> int:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = SqueezeBertModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
__SCREAMING_SNAKE_CASE = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 3) )
self.assertEqual(output.shape , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-4 ) )
| 54
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
a__ : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , **UpperCAmelCase__ : List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = feature_size
__SCREAMING_SNAKE_CASE = sampling_rate
__SCREAMING_SNAKE_CASE = padding_value
__SCREAMING_SNAKE_CASE = kwargs.pop("padding_side" , "right" )
__SCREAMING_SNAKE_CASE = kwargs.pop("return_attention_mask" , UpperCAmelCase__ )
super().__init__(**UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = True , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(UpperCAmelCase__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__SCREAMING_SNAKE_CASE = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
__SCREAMING_SNAKE_CASE = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(UpperCAmelCase__ ) == 0:
if return_attention_mask:
__SCREAMING_SNAKE_CASE = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__SCREAMING_SNAKE_CASE = required_input[0]
if isinstance(UpperCAmelCase__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__SCREAMING_SNAKE_CASE = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = "tf"
elif is_torch_tensor(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = "pt"
elif isinstance(UpperCAmelCase__ , (int, float, list, tuple, np.ndarray) ):
__SCREAMING_SNAKE_CASE = "np"
else:
raise ValueError(
F"""type of {first_element} unknown: {type(UpperCAmelCase__ )}. """
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__SCREAMING_SNAKE_CASE = to_numpy(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = [to_numpy(UpperCAmelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
__SCREAMING_SNAKE_CASE = self._get_padding_strategies(padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
if not all(len(UpperCAmelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
__SCREAMING_SNAKE_CASE = []
for i in range(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = {k: v[i] for k, v in processed_features.items()}
# truncation
__SCREAMING_SNAKE_CASE = self._truncate(
UpperCAmelCase__ , max_length=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , truncation=UpperCAmelCase__ , )
truncated_inputs.append(UpperCAmelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__SCREAMING_SNAKE_CASE = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH
__SCREAMING_SNAKE_CASE = {}
for i in range(UpperCAmelCase__ ):
# padding
__SCREAMING_SNAKE_CASE = self._pad(
truncated_inputs[i] , max_length=UpperCAmelCase__ , padding_strategy=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
for key, value in outputs.items():
if key not in batch_outputs:
__SCREAMING_SNAKE_CASE = []
if value.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = value.astype(np.floataa )
batch_outputs[key].append(UpperCAmelCase__ )
return BatchFeature(UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[Dict[str, np.ndarray], BatchFeature] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , ) -> dict:
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__SCREAMING_SNAKE_CASE = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(UpperCAmelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__SCREAMING_SNAKE_CASE = np.ones(len(UpperCAmelCase__ ) , dtype=np.intaa )
if needs_to_be_padded:
__SCREAMING_SNAKE_CASE = max_length - len(UpperCAmelCase__ )
if self.padding_side == "right":
if return_attention_mask:
__SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (0, difference) )
__SCREAMING_SNAKE_CASE = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__SCREAMING_SNAKE_CASE = np.pad(
UpperCAmelCase__ , UpperCAmelCase__ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (difference, 0) )
__SCREAMING_SNAKE_CASE = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__SCREAMING_SNAKE_CASE = np.pad(
UpperCAmelCase__ , UpperCAmelCase__ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Union[Dict[str, np.ndarray], BatchFeature] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , ) -> str:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) > max_length
if needs_to_be_truncated:
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__SCREAMING_SNAKE_CASE = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Optional[int]=None ) -> str:
# Get padding strategy
if padding is not False:
if padding is True:
__SCREAMING_SNAKE_CASE = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = PaddingStrategy(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = padding
else:
__SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 54
| 1
|
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
a__ : Union[str, Any] = {
'''E''': 12.70,
'''T''': 9.06,
'''A''': 8.17,
'''O''': 7.51,
'''I''': 6.97,
'''N''': 6.75,
'''S''': 6.33,
'''H''': 6.09,
'''R''': 5.99,
'''D''': 4.25,
'''L''': 4.03,
'''C''': 2.78,
'''U''': 2.76,
'''M''': 2.41,
'''W''': 2.36,
'''F''': 2.23,
'''G''': 2.02,
'''Y''': 1.97,
'''P''': 1.93,
'''B''': 1.29,
'''V''': 0.98,
'''K''': 0.77,
'''J''': 0.15,
'''X''': 0.15,
'''Q''': 0.10,
'''Z''': 0.07,
}
a__ : Optional[Any] = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
a__ : Dict = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return x[0]
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_letter_count(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = "".join(freq_to_letter[freq] )
__SCREAMING_SNAKE_CASE = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCAmelCase_ , reverse=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_frequency_order(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
a__ : Any = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ )
requires_backends(lowerCAmelCase_ , "sklearn" )
return (preds == labels).mean()
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ )
requires_backends(lowerCAmelCase_ , "sklearn" )
__SCREAMING_SNAKE_CASE = simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ )
requires_backends(lowerCAmelCase_ , "sklearn" )
__SCREAMING_SNAKE_CASE = pearsonr(lowerCAmelCase_ , lowerCAmelCase_ )[0]
__SCREAMING_SNAKE_CASE = spearmanr(lowerCAmelCase_ , lowerCAmelCase_ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ )
requires_backends(lowerCAmelCase_ , "sklearn" )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), f"""Predictions and labels have mismatched lengths {len(lowerCAmelCase_ )} and {len(lowerCAmelCase_ )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "mrpc":
return acc_and_fa(lowerCAmelCase_ , lowerCAmelCase_ )
elif task_name == "sts-b":
return pearson_and_spearman(lowerCAmelCase_ , lowerCAmelCase_ )
elif task_name == "qqp":
return acc_and_fa(lowerCAmelCase_ , lowerCAmelCase_ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
else:
raise KeyError(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ )
requires_backends(lowerCAmelCase_ , "sklearn" )
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(lowerCAmelCase_ )} and {len(lowerCAmelCase_ )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
else:
raise KeyError(lowerCAmelCase_ )
| 54
| 1
|
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
a__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
a__ , a__ : Optional[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
a__ : List[str] = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
a__ : Union[str, Any] = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
a__ : Any = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 54
|
"""simple docstring"""
import math
import random
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = False ):
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
a__ : Tuple = 0.02
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(lowerCAmelCase_ ):
# Forward propagation
__SCREAMING_SNAKE_CASE = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__SCREAMING_SNAKE_CASE = (expected / 100) - layer_a
# Error delta
__SCREAMING_SNAKE_CASE = layer_1_error * sigmoid_function(lowerCAmelCase_ , lowerCAmelCase_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : List[str] = int(input('''Expected value: '''))
a__ : str = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 54
| 1
|
"""simple docstring"""
import logging
import os
from .state import PartialState
class UpperCamelCase_ ( logging.LoggerAdapter):
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( UpperCAmelCase__ : Tuple ) -> str:
__SCREAMING_SNAKE_CASE = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Dict ) -> Tuple:
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
__SCREAMING_SNAKE_CASE = kwargs.pop("main_process_only" , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = kwargs.pop("in_order" , UpperCAmelCase__ )
if self.isEnabledFor(UpperCAmelCase__ ):
if self._should_log(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.process(UpperCAmelCase__ , UpperCAmelCase__ )
self.logger.log(UpperCAmelCase__ , UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
elif in_order:
__SCREAMING_SNAKE_CASE = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.process(UpperCAmelCase__ , UpperCAmelCase__ )
self.logger.log(UpperCAmelCase__ , UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
state.wait_for_everyone()
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = None ):
'''simple docstring'''
if log_level is None:
__SCREAMING_SNAKE_CASE = os.environ.get("ACCELERATE_LOG_LEVEL" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = logging.getLogger(lowerCAmelCase_ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowerCAmelCase_ , {} )
| 54
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a__ : Tuple = False
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : int ) -> int:
__SCREAMING_SNAKE_CASE = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
image=UpperCAmelCase__ , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" , ).images
__SCREAMING_SNAKE_CASE = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 54
| 1
|
"""simple docstring"""
import re
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
try:
__SCREAMING_SNAKE_CASE = split_input(lowerCAmelCase_ )
if upper:
__SCREAMING_SNAKE_CASE = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__SCREAMING_SNAKE_CASE = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return to_simple_case(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
try:
__SCREAMING_SNAKE_CASE = to_simple_case(lowerCAmelCase_ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return to_complex_case(lowerCAmelCase_ , lowerCAmelCase_ , "_" )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return to_complex_case(lowerCAmelCase_ , lowerCAmelCase_ , "-" )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 54
|
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
a__ : Union[str, Any] = logging.get_logger(__name__)
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__SCREAMING_SNAKE_CASE = json.loads(lowerCAmelCase_ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__SCREAMING_SNAKE_CASE = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__SCREAMING_SNAKE_CASE = json.loads(lowerCAmelCase_ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCAmelCase_ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : str = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def UpperCAmelCase_ ( self : List[str] ) -> Any:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , UpperCAmelCase__ , )
@cached_property
def UpperCAmelCase_ ( self : List[str] ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
__SCREAMING_SNAKE_CASE = torch.device("cpu" )
__SCREAMING_SNAKE_CASE = 0
elif is_sagemaker_model_parallel_available():
__SCREAMING_SNAKE_CASE = smp.local_rank()
__SCREAMING_SNAKE_CASE = torch.device("cuda" , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
__SCREAMING_SNAKE_CASE = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
__SCREAMING_SNAKE_CASE = torch.device("cuda" , self.local_rank )
__SCREAMING_SNAKE_CASE = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__SCREAMING_SNAKE_CASE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__SCREAMING_SNAKE_CASE = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
__SCREAMING_SNAKE_CASE = torch.device("cuda" , self.local_rank )
__SCREAMING_SNAKE_CASE = 1
if device.type == "cuda":
torch.cuda.set_device(UpperCAmelCase__ )
return device
@property
def UpperCAmelCase_ ( self : Dict ) -> Any:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
return not is_sagemaker_model_parallel_available()
@property
def UpperCAmelCase_ ( self : Tuple ) -> int:
return False
| 54
| 1
|
"""simple docstring"""
from ... import PretrainedConfig
a__ : List[Any] = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : str = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
snake_case__ : Optional[Any] = "nezha"
def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any]=2_1_1_2_8 , UpperCAmelCase__ : List[str]=7_6_8 , UpperCAmelCase__ : Union[str, Any]=1_2 , UpperCAmelCase__ : str=1_2 , UpperCAmelCase__ : Dict=3_0_7_2 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=5_1_2 , UpperCAmelCase__ : Tuple=6_4 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : Optional[Any]=1E-12 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[str]=0 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : List[Any]=True , **UpperCAmelCase__ : Any , ) -> List[str]:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = max_relative_position
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = classifier_dropout
__SCREAMING_SNAKE_CASE = use_cache
| 54
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__SCREAMING_SNAKE_CASE = [[1, 2, 4], [1, 2, 3, 4]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(UpperCAmelCase__ )
self.assertTrue(isinstance(dc.token_ids , UpperCAmelCase__ ) )
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase_ ( self : Any ) -> int:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__SCREAMING_SNAKE_CASE = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint(UpperCAmelCase__ ) # fails here
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(3 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is True and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase_ ( self : str ) -> List[str]:
__SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 54
| 1
|
"""simple docstring"""
from math import sqrt
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
__SCREAMING_SNAKE_CASE = True
# 0 and 1 are none primes.
if number <= 1:
__SCREAMING_SNAKE_CASE = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__SCREAMING_SNAKE_CASE = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__SCREAMING_SNAKE_CASE = list(range(2 , n + 1 ) )
__SCREAMING_SNAKE_CASE = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__SCREAMING_SNAKE_CASE = 0
# filters actual prime numbers.
__SCREAMING_SNAKE_CASE = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
__SCREAMING_SNAKE_CASE = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
__SCREAMING_SNAKE_CASE = [] # this list will be returns of the function.
# potential prime number factors.
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__SCREAMING_SNAKE_CASE = 0
# prime factorization of 'number'
__SCREAMING_SNAKE_CASE = prime_factorization(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__SCREAMING_SNAKE_CASE = 0
# prime factorization of 'number'
__SCREAMING_SNAKE_CASE = prime_factorization(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
__SCREAMING_SNAKE_CASE = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__SCREAMING_SNAKE_CASE = get_prime_numbers(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ )
# run variable for while-loops.
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = None
# exit variable. for break up the loops
__SCREAMING_SNAKE_CASE = True
while i < len_pn and loop:
__SCREAMING_SNAKE_CASE = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__SCREAMING_SNAKE_CASE = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__SCREAMING_SNAKE_CASE = 0
while numbera != 0:
__SCREAMING_SNAKE_CASE = numbera % numbera
__SCREAMING_SNAKE_CASE = numbera
__SCREAMING_SNAKE_CASE = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__SCREAMING_SNAKE_CASE = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__SCREAMING_SNAKE_CASE = prime_factorization(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = max(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__SCREAMING_SNAKE_CASE = prime_fac_a.count(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
__SCREAMING_SNAKE_CASE = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__SCREAMING_SNAKE_CASE = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__SCREAMING_SNAKE_CASE = p_number_a + 1 # jump to the next number
__SCREAMING_SNAKE_CASE = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
__SCREAMING_SNAKE_CASE = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
__SCREAMING_SNAKE_CASE = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__SCREAMING_SNAKE_CASE = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
__SCREAMING_SNAKE_CASE = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 1 # this will be return
for _ in range(n - 1 ):
__SCREAMING_SNAKE_CASE = ans
ans += fiba
__SCREAMING_SNAKE_CASE = tmp
return ans
| 54
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
'''simple docstring'''
if start is None:
__SCREAMING_SNAKE_CASE = 0
if end is None:
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) - 1
if start >= end:
return
__SCREAMING_SNAKE_CASE = (start + end) // 2
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ )
if sequence[end] < sequence[mid]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sequence[mid], sequence[end]
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 54
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a__ : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
a__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 54
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
__SCREAMING_SNAKE_CASE = len(bin(lowerCAmelCase_ )[3:] )
__SCREAMING_SNAKE_CASE = bin(abs(lowerCAmelCase_ ) - (1 << binary_number_length) )[3:]
__SCREAMING_SNAKE_CASE = (
(
"1"
+ "0" * (binary_number_length - len(lowerCAmelCase_ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
| 1
|
"""simple docstring"""
from math import ceil, sqrt
def UpperCAmelCase__ (lowerCAmelCase_ = 100_0000 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__SCREAMING_SNAKE_CASE = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__SCREAMING_SNAKE_CASE = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"{solution() = }")
| 54
|
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any = CLIPTokenizer
snake_case__ : Dict = CLIPTokenizerFast
snake_case__ : List[Any] = True
snake_case__ : Optional[Any] = {}
snake_case__ : Dict = False
def UpperCAmelCase_ ( self : Any ) -> Any:
super().setUp()
# fmt: off
__SCREAMING_SNAKE_CASE = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
__SCREAMING_SNAKE_CASE = {"unk_token": "<unk>"}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase__ ) )
def UpperCAmelCase_ ( self : List[Any] , **UpperCAmelCase__ : Tuple ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Any , **UpperCAmelCase__ : Optional[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = "lower newer"
__SCREAMING_SNAKE_CASE = "lower newer"
return input_text, output_text
def UpperCAmelCase_ ( self : int ) -> List[str]:
__SCREAMING_SNAKE_CASE = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__SCREAMING_SNAKE_CASE = "lower newer"
__SCREAMING_SNAKE_CASE = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE = [1_0, 2, 1_6, 9, 3, 2, 1_6, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@require_ftfy
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__SCREAMING_SNAKE_CASE = "xa\u0303y" + " " + "x\xe3y"
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Test that the tokenization is identical on unicode of space type
__SCREAMING_SNAKE_CASE = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Test that the tokenization is identical on unicode of line break type
__SCREAMING_SNAKE_CASE = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
__SCREAMING_SNAKE_CASE = F"""{text_of_1_token} {text_of_1_token}"""
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase__ ) + 1, len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
__SCREAMING_SNAKE_CASE = F""" {text}"""
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase__ ) + 1, 1 + len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(UpperCAmelCase__ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
super().test_tokenization_python_rust_equals()
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
# CLIP always lower cases letters
pass
| 54
| 1
|
"""simple docstring"""
from __future__ import annotations
a__ : List[str] = 8.988E9 # units = N * m^s * C^-2
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if distance < 0:
raise ValueError("Distance cannot be negative" )
if force == 0:
__SCREAMING_SNAKE_CASE = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__SCREAMING_SNAKE_CASE = abs(lowerCAmelCase_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__SCREAMING_SNAKE_CASE = abs(lowerCAmelCase_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__SCREAMING_SNAKE_CASE = (COULOMBS_CONSTANT * charge_product / abs(lowerCAmelCase_ )) ** 0.5
return {"distance": distance}
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(lowerCAmelCase_ , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = _distribute_shards(**lowerCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = _split_gen_kwargs(lowerCAmelCase_ , lowerCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(lowerCAmelCase_ ):
_number_of_shards_in_gen_kwargs(lowerCAmelCase_ )
else:
__SCREAMING_SNAKE_CASE = _number_of_shards_in_gen_kwargs(lowerCAmelCase_ )
assert out == expected
| 54
| 1
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def UpperCAmelCase__ (lowerCAmelCase_ = "AAPL" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(lowerCAmelCase_ ).text , "html.parser" )
__SCREAMING_SNAKE_CASE = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 54
|
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return x + 2
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Any ) -> Any:
__SCREAMING_SNAKE_CASE = "x = 3"
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
assert result == 3
self.assertDictEqual(UpperCAmelCase__ , {"x": 3} )
__SCREAMING_SNAKE_CASE = "x = y"
__SCREAMING_SNAKE_CASE = {"y": 5}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase__ , {"x": 5, "y": 5} )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
__SCREAMING_SNAKE_CASE = "y = add_two(x)"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ )
assert result == 5
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = "x = 3"
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
assert result == 3
self.assertDictEqual(UpperCAmelCase__ , {"x": 3} )
def UpperCAmelCase_ ( self : str ) -> Any:
__SCREAMING_SNAKE_CASE = "test_dict = {'x': x, 'y': add_two(x)}"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ )
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 5} )
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = "x = 3\ny = 5"
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 5} )
def UpperCAmelCase_ ( self : Any ) -> Any:
__SCREAMING_SNAKE_CASE = "text = f'This is x: {x}.'"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "text": "This is x: 3."} )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = "if x <= 3:\n y = 2\nelse:\n y = 5"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 2} )
__SCREAMING_SNAKE_CASE = {"x": 8}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase__ , {"x": 8, "y": 5} )
def UpperCAmelCase_ ( self : Tuple ) -> str:
__SCREAMING_SNAKE_CASE = "test_list = [x, add_two(x)]"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , [3, 5] )
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "test_list": [3, 5]} )
def UpperCAmelCase_ ( self : Any ) -> int:
__SCREAMING_SNAKE_CASE = "y = x"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
assert result == 3
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 3} )
def UpperCAmelCase_ ( self : Tuple ) -> int:
__SCREAMING_SNAKE_CASE = "test_list = [x, add_two(x)]\ntest_list[1]"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ )
assert result == 5
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "test_list": [3, 5]} )
__SCREAMING_SNAKE_CASE = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ )
assert result == 5
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE = "x = 0\nfor i in range(3):\n x = i"
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"range": range} , state=UpperCAmelCase__ )
assert result == 2
self.assertDictEqual(UpperCAmelCase__ , {"x": 2, "i": 2} )
| 54
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0 ) -> None:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = row, column
__SCREAMING_SNAKE_CASE = [[default_value for c in range(UpperCAmelCase__ )] for r in range(UpperCAmelCase__ )]
def __str__( self : str ) -> str:
__SCREAMING_SNAKE_CASE = F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
__SCREAMING_SNAKE_CASE = 0
for row_vector in self.array:
for obj in row_vector:
__SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , len(str(UpperCAmelCase__ ) ) )
__SCREAMING_SNAKE_CASE = F"""%{max_element_length}s"""
# Make string and return
def single_line(UpperCAmelCase__ : list[float] ) -> str:
nonlocal string_format_identifier
__SCREAMING_SNAKE_CASE = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__ ) for row_vector in self.array )
return s
def __repr__( self : Union[str, Any] ) -> str:
return str(self )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : tuple[int, int] ) -> bool:
if not (isinstance(UpperCAmelCase__ , (list, tuple) ) and len(UpperCAmelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Tuple , UpperCAmelCase__ : tuple[int, int] ) -> Any:
assert self.validate_indicies(UpperCAmelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Optional[int] , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : float ) -> None:
assert self.validate_indicies(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = value
def __add__( self : Any , UpperCAmelCase__ : Matrix ) -> Matrix:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == another.row and self.column == another.column
# Add
__SCREAMING_SNAKE_CASE = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__SCREAMING_SNAKE_CASE = self[r, c] + another[r, c]
return result
def __neg__( self : List[str] ) -> Matrix:
__SCREAMING_SNAKE_CASE = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__SCREAMING_SNAKE_CASE = -self[r, c]
return result
def __sub__( self : Optional[int] , UpperCAmelCase__ : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : Optional[int] , UpperCAmelCase__ : int | float | Matrix ) -> Matrix:
if isinstance(UpperCAmelCase__ , (int, float) ): # Scalar multiplication
__SCREAMING_SNAKE_CASE = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__SCREAMING_SNAKE_CASE = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): # Matrix multiplication
assert self.column == another.row
__SCREAMING_SNAKE_CASE = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__SCREAMING_SNAKE_CASE = F"""Unsupported type given for another ({type(UpperCAmelCase__ )})"""
raise TypeError(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Matrix:
__SCREAMING_SNAKE_CASE = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__SCREAMING_SNAKE_CASE = self[r, c]
return result
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Matrix , UpperCAmelCase__ : Matrix ) -> Any:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__SCREAMING_SNAKE_CASE = v.transpose()
__SCREAMING_SNAKE_CASE = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Matrix(3 , 3 , 0 )
for i in range(3 ):
__SCREAMING_SNAKE_CASE = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
__SCREAMING_SNAKE_CASE = Matrix(3 , 1 , 0 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1, 2, -3
__SCREAMING_SNAKE_CASE = Matrix(3 , 1 , 0 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCAmelCase_ , lowerCAmelCase_ )}""" )
def UpperCAmelCase__ ():
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 54
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : str = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 54
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=1_3 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : Tuple=2_2_4 , UpperCAmelCase__ : List[Any]=3_0 , UpperCAmelCase__ : Tuple=4_0_0 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Optional[int]=[0.5, 0.5, 0.5] , ) -> str:
__SCREAMING_SNAKE_CASE = size if size is not None else {"height": 1_8, "width": 1_8}
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean
__SCREAMING_SNAKE_CASE = image_std
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] = ViTImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE = EfficientFormerImageProcessorTester(self )
@property
def UpperCAmelCase_ ( self : Any ) -> Tuple:
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "size" ) )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
pass
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
# Initialize image_processor
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processor(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
# Initialize image_processor
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processor(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
# Initialize image_processor
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processor(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 54
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
a__ : Tuple = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def UpperCAmelCase__ (lowerCAmelCase_=None ):
'''simple docstring'''
if subparsers is not None:
__SCREAMING_SNAKE_CASE = subparsers.add_parser("tpu-config" , description=_description )
else:
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
__SCREAMING_SNAKE_CASE = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=lowerCAmelCase_ , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=lowerCAmelCase_ , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
__SCREAMING_SNAKE_CASE = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=lowerCAmelCase_ , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__SCREAMING_SNAKE_CASE = defaults.command_file
if not args.command and defaults.commands is not None:
__SCREAMING_SNAKE_CASE = defaults.commands
if not args.tpu_name:
__SCREAMING_SNAKE_CASE = defaults.tpu_name
if not args.tpu_zone:
__SCREAMING_SNAKE_CASE = defaults.tpu_zone
if args.accelerate_version == "dev":
__SCREAMING_SNAKE_CASE = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
__SCREAMING_SNAKE_CASE = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = f"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
__SCREAMING_SNAKE_CASE = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__SCREAMING_SNAKE_CASE = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [f"""pip install {args.accelerate_version}"""]
new_cmd += args.command
__SCREAMING_SNAKE_CASE = "; ".join(lowerCAmelCase_ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__SCREAMING_SNAKE_CASE = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"""Running {' '.join(lowerCAmelCase_ )}""" )
return
subprocess.run(lowerCAmelCase_ )
print("Successfully setup pod." )
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tpu_command_parser()
__SCREAMING_SNAKE_CASE = parser.parse_args()
tpu_command_launcher(lowerCAmelCase_ )
| 54
| 1
|
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a__ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=5_1_2,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
a__ : List[str] = parser.parse_args()
a__ : Optional[int] = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 54
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( UpperCAmelCase__ : ArgumentParser ) -> int:
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
raise NotImplementedError()
| 54
| 1
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
a__ : List[str] = get_logger(__name__)
a__ : Optional[Any] = Path(__file__).parent / '''model_card_template.md'''
a__ : Optional[int] = uuida().hex
a__ : Any = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
a__ : Any = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
a__ : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def UpperCAmelCase__ (lowerCAmelCase_ = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
ua += "; " + user_agent
return ua
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
'''simple docstring'''
if token is None:
__SCREAMING_SNAKE_CASE = HfFolder.get_token()
if organization is None:
__SCREAMING_SNAKE_CASE = whoami(lowerCAmelCase_ )["name"]
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(lowerCAmelCase_ , "local_rank" ) and args.local_rank not in [-1, 0]:
return
__SCREAMING_SNAKE_CASE = args.hub_token if hasattr(lowerCAmelCase_ , "hub_token" ) else None
__SCREAMING_SNAKE_CASE = get_full_repo_name(lowerCAmelCase_ , token=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=lowerCAmelCase_ , model_name=lowerCAmelCase_ , repo_name=lowerCAmelCase_ , dataset_name=args.dataset_name if hasattr(lowerCAmelCase_ , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(lowerCAmelCase_ , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(lowerCAmelCase_ , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(lowerCAmelCase_ , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(lowerCAmelCase_ , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(lowerCAmelCase_ , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(lowerCAmelCase_ , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(lowerCAmelCase_ , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(lowerCAmelCase_ , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(lowerCAmelCase_ , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(lowerCAmelCase_ , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
__SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , "README.md" )
model_card.save(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = None ):
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
__SCREAMING_SNAKE_CASE = str(Path(lowerCAmelCase_ ).as_posix() )
__SCREAMING_SNAKE_CASE = re.search(R"snapshots/([^/]+)/" , lowerCAmelCase_ )
if search is None:
return None
__SCREAMING_SNAKE_CASE = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(lowerCAmelCase_ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
a__ : Tuple = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
a__ : int = os.path.join(hf_cache_home, '''diffusers''')
def UpperCAmelCase__ (lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
'''simple docstring'''
if new_cache_dir is None:
__SCREAMING_SNAKE_CASE = DIFFUSERS_CACHE
if old_cache_dir is None:
__SCREAMING_SNAKE_CASE = old_diffusers_cache
__SCREAMING_SNAKE_CASE = Path(lowerCAmelCase_ ).expanduser()
__SCREAMING_SNAKE_CASE = Path(lowerCAmelCase_ ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__SCREAMING_SNAKE_CASE = new_cache_dir / old_blob_path.relative_to(lowerCAmelCase_ )
new_blob_path.parent.mkdir(parents=lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
os.replace(lowerCAmelCase_ , lowerCAmelCase_ )
try:
os.symlink(lowerCAmelCase_ , lowerCAmelCase_ )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
a__ : int = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
a__ : Any = 0
else:
with open(cache_version_file) as f:
try:
a__ : Optional[int] = int(f.read())
except ValueError:
a__ : Optional[int] = 0
if cache_version < 1:
a__ : List[Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
a__ : Union[str, Any] = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease "
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure "
'''the directory exists and can be written to.'''
)
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = None ):
'''simple docstring'''
if variant is not None:
__SCREAMING_SNAKE_CASE = weights_name.split("." )
__SCREAMING_SNAKE_CASE = splits[:-1] + [variant] + splits[-1:]
__SCREAMING_SNAKE_CASE = ".".join(lowerCAmelCase_ )
return weights_name
def UpperCAmelCase__ (lowerCAmelCase_ , *,
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = str(lowerCAmelCase_ )
if os.path.isfile(lowerCAmelCase_ ):
return pretrained_model_name_or_path
elif os.path.isdir(lowerCAmelCase_ ):
if os.path.isfile(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ):
# Load from a PyTorch checkpoint
__SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ):
__SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(lowerCAmelCase_ ).base_version ) >= version.parse("0.20.0" )
):
try:
__SCREAMING_SNAKE_CASE = hf_hub_download(
lowerCAmelCase_ , filename=_add_variant(lowerCAmelCase_ , lowerCAmelCase_ ) , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , user_agent=lowerCAmelCase_ , subfolder=lowerCAmelCase_ , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , lowerCAmelCase_ , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(lowerCAmelCase_ , lowerCAmelCase_ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(lowerCAmelCase_ , lowerCAmelCase_ )}' so that the correct variant file can be added.""" , lowerCAmelCase_ , )
try:
# 2. Load model file as usual
__SCREAMING_SNAKE_CASE = hf_hub_download(
lowerCAmelCase_ , filename=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , user_agent=lowerCAmelCase_ , subfolder=lowerCAmelCase_ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
"this model name. Check the model page at "
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 54
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def UpperCAmelCase__ (lowerCAmelCase_ = 100_0000 , lowerCAmelCase_ = 10 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = defaultdict(lowerCAmelCase_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
__SCREAMING_SNAKE_CASE = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
__SCREAMING_SNAKE_CASE = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowerCAmelCase_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"{solution() = }")
| 54
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ():
'''simple docstring'''
return 1
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ = 200 ):
'''simple docstring'''
return two_pound(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 54
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a__ : List[str] = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : Dict = PegasusConfig
snake_case__ : Union[str, Any] = {}
snake_case__ : Any = "gelu"
def __init__( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int=1_3 , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : List[Any]=9_9 , UpperCAmelCase__ : int=3_2 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : List[Any]=3_7 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : List[Any]=2_0 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : Optional[Any]=0 , ) -> Any:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
def UpperCAmelCase_ ( self : Dict ) -> Dict:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__SCREAMING_SNAKE_CASE = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__SCREAMING_SNAKE_CASE = np.concatenate([input_ids, eos_tensor] , axis=1 )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__SCREAMING_SNAKE_CASE = prepare_pegasus_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, inputs_dict
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ) -> str:
__SCREAMING_SNAKE_CASE = 2_0
__SCREAMING_SNAKE_CASE = model_class_name(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model.encode(inputs_dict["input_ids"] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = 2_0
__SCREAMING_SNAKE_CASE = model_class_name(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model.encode(inputs_dict["input_ids"] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__SCREAMING_SNAKE_CASE = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
'''simple docstring'''
if attention_mask is None:
__SCREAMING_SNAKE_CASE = np.not_equal(lowerCAmelCase_ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Tuple = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
snake_case__ : Union[str, Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
snake_case__ : Tuple = True
snake_case__ : Union[str, Any] = False
snake_case__ : int = False
snake_case__ : List[Any] = False
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = FlaxPegasusModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
@jax.jit
def encode_jitted(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=None , **UpperCAmelCase__ : int ):
return model.encode(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
with self.subTest("JIT Enabled" ):
__SCREAMING_SNAKE_CASE = encode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = encode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase_ ( self : Tuple ) -> Any:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__SCREAMING_SNAKE_CASE = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ):
return model.decode(
decoder_input_ids=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , encoder_outputs=UpperCAmelCase__ , )
with self.subTest("JIT Enabled" ):
__SCREAMING_SNAKE_CASE = decode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = decode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("google/pegasus-large" , from_pt=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = np.ones((1, 1) )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
__SCREAMING_SNAKE_CASE = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
__SCREAMING_SNAKE_CASE = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__SCREAMING_SNAKE_CASE = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
__SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase__ , return_tensors="np" , truncation=UpperCAmelCase__ , max_length=5_1_2 , padding=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model.generate(**UpperCAmelCase__ , num_beams=2 ).sequences
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
assert tgt_text == decoded
| 54
| 1
|
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
a__ : Any = None
a__ : List[str] = logging.get_logger(__name__)
a__ : int = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : Tuple = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
a__ : int = {
'''t5-small''': 5_1_2,
'''t5-base''': 5_1_2,
'''t5-large''': 5_1_2,
'''t5-3b''': 5_1_2,
'''t5-11b''': 5_1_2,
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : List[Any] = VOCAB_FILES_NAMES
snake_case__ : Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : List[str] = ["input_ids", "attention_mask"]
snake_case__ : Tuple = TaTokenizer
snake_case__ : List[int] = []
def __init__( self : Tuple , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : str="</s>" , UpperCAmelCase__ : str="<unk>" , UpperCAmelCase__ : str="<pad>" , UpperCAmelCase__ : int=1_0_0 , UpperCAmelCase__ : Optional[int]=None , **UpperCAmelCase__ : Tuple , ) -> Optional[int]:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__SCREAMING_SNAKE_CASE = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__SCREAMING_SNAKE_CASE = len(set(filter(lambda UpperCAmelCase__ : bool("extra_id_" in str(UpperCAmelCase__ ) ) , UpperCAmelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , extra_ids=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
__SCREAMING_SNAKE_CASE = extra_ids
@staticmethod
def UpperCAmelCase_ ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__SCREAMING_SNAKE_CASE = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , UpperCAmelCase__ , )
return max_model_length
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ):
copyfile(self.vocab_file , UpperCAmelCase__ )
logger.info(F"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__SCREAMING_SNAKE_CASE = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
return list(
set(filter(lambda UpperCAmelCase__ : bool(re.search(R"<extra_id_\d+>" , UpperCAmelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
return [self.convert_tokens_to_ids(UpperCAmelCase__ ) for token in self.get_sentinel_tokens()]
| 54
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ = 100_0000 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = set(range(3 , lowerCAmelCase_ , 2 ) )
primes.add(2 )
for p in range(3 , lowerCAmelCase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCAmelCase_ , lowerCAmelCase_ ) ) )
__SCREAMING_SNAKE_CASE = [float(lowerCAmelCase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCAmelCase_ , limit + 1 , lowerCAmelCase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 54
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
a__ : Union[str, Any] = None
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Optional[int] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : Optional[int] = {
'''vocab_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''',
},
}
a__ : List[str] = {
'''google/fnet-base''': 5_1_2,
'''google/fnet-large''': 5_1_2,
}
a__ : Union[str, Any] = '''▁'''
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : List[Any] = VOCAB_FILES_NAMES
snake_case__ : int = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : List[str] = ["input_ids", "token_type_ids"]
snake_case__ : Tuple = FNetTokenizer
def __init__( self : str , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : List[Any]="<unk>" , UpperCAmelCase__ : Dict="[SEP]" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : Any="[CLS]" , UpperCAmelCase__ : List[Any]="[MASK]" , **UpperCAmelCase__ : Union[str, Any] , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__SCREAMING_SNAKE_CASE = (
AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ , normalized=UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
else mask_token
)
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = do_lower_case
__SCREAMING_SNAKE_CASE = remove_space
__SCREAMING_SNAKE_CASE = keep_accents
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ):
copyfile(self.vocab_file , UpperCAmelCase__ )
return (out_vocab_file,)
| 54
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
__SCREAMING_SNAKE_CASE = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__SCREAMING_SNAKE_CASE = 1
if upper_limit > 0:
__SCREAMING_SNAKE_CASE = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowerCAmelCase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
a__ : List[str] = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"The Catalan numbers from 0 through {N} are:")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 54
| 1
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a__ : int = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__SCREAMING_SNAKE_CASE = k.replace(lowerCAmelCase_ , lowerCAmelCase_ )
return k
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = DEFAULTS.copy()
cfg_kwargs.update(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = PegasusConfig(**lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = PegasusForConditionalGeneration(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = torch_model.model.state_dict()
__SCREAMING_SNAKE_CASE = {}
for k, v in tf_weights.items():
__SCREAMING_SNAKE_CASE = rename_state_dict_key(lowerCAmelCase_ )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
__SCREAMING_SNAKE_CASE = v.T
__SCREAMING_SNAKE_CASE = torch.tensor(lowerCAmelCase_ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
__SCREAMING_SNAKE_CASE = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
__SCREAMING_SNAKE_CASE = mapping["shared.weight"]
__SCREAMING_SNAKE_CASE = mapping["shared.weight"]
__SCREAMING_SNAKE_CASE = {k: torch.zeros_like(lowerCAmelCase_ ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch_model.model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def UpperCAmelCase__ (lowerCAmelCase_="./ckpt/aeslc/model.ckpt-32000" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tf.train.list_variables(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = ["Adafactor", "global_step"]
for name, shape in tqdm(lowerCAmelCase_ , desc="converting tf checkpoint to dict" ):
__SCREAMING_SNAKE_CASE = any(pat in name for pat in ignore_name )
if skip_key:
continue
__SCREAMING_SNAKE_CASE = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = array
return tf_weights
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Path(lowerCAmelCase_ ).parent.name
__SCREAMING_SNAKE_CASE = task_specific_params[f"""summarization_{dataset}"""]["max_position_embeddings"]
__SCREAMING_SNAKE_CASE = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=lowerCAmelCase_ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(lowerCAmelCase_ )
# convert model
__SCREAMING_SNAKE_CASE = get_tf_weights_as_numpy(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
__SCREAMING_SNAKE_CASE = task_specific_params
__SCREAMING_SNAKE_CASE = convert_pegasus(lowerCAmelCase_ , lowerCAmelCase_ )
torch_model.save_pretrained(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(lowerCAmelCase_ , Path(lowerCAmelCase_ ) / "pytorch_model.bin" )
if __name__ == "__main__":
a__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
a__ : List[str] = parser.parse_args()
if args.save_dir is None:
a__ : int = Path(args.tf_ckpt_path).parent.name
a__ : Tuple = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 54
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
a__ : Tuple = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
snake_case__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : Optional[str] = field(default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
if self.train_file is not None:
__SCREAMING_SNAKE_CASE = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__SCREAMING_SNAKE_CASE = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : PreTrainedTokenizerBase
snake_case__ : Union[bool, str, PaddingStrategy] = True
snake_case__ : Optional[int] = None
snake_case__ : Optional[int] = None
def __call__( self : int , UpperCAmelCase__ : Any ) -> str:
__SCREAMING_SNAKE_CASE = "label" if "label" in features[0].keys() else "labels"
__SCREAMING_SNAKE_CASE = [feature.pop(UpperCAmelCase__ ) for feature in features]
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = len(features[0]["input_ids"] )
__SCREAMING_SNAKE_CASE = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCAmelCase__ )] for feature in features
]
__SCREAMING_SNAKE_CASE = list(chain(*UpperCAmelCase__ ) )
__SCREAMING_SNAKE_CASE = self.tokenizer.pad(
UpperCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
__SCREAMING_SNAKE_CASE = {k: v.view(UpperCAmelCase__ , UpperCAmelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
__SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ , dtype=torch.intaa )
return batch
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , lowerCAmelCase_ , lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
datasets.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__SCREAMING_SNAKE_CASE = {}
if data_args.train_file is not None:
__SCREAMING_SNAKE_CASE = data_args.train_file
if data_args.validation_file is not None:
__SCREAMING_SNAKE_CASE = data_args.validation_file
__SCREAMING_SNAKE_CASE = data_args.train_file.split("." )[-1]
__SCREAMING_SNAKE_CASE = load_dataset(
lowerCAmelCase_ , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__SCREAMING_SNAKE_CASE = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__SCREAMING_SNAKE_CASE = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__SCREAMING_SNAKE_CASE = [f"""ending{i}""" for i in range(4 )]
__SCREAMING_SNAKE_CASE = "sent1"
__SCREAMING_SNAKE_CASE = "sent2"
if data_args.max_seq_length is None:
__SCREAMING_SNAKE_CASE = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
__SCREAMING_SNAKE_CASE = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__SCREAMING_SNAKE_CASE = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = [[context] * 4 for context in examples[context_name]]
__SCREAMING_SNAKE_CASE = examples[question_header_name]
__SCREAMING_SNAKE_CASE = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(lowerCAmelCase_ )
]
# Flatten out
__SCREAMING_SNAKE_CASE = list(chain(*lowerCAmelCase_ ) )
__SCREAMING_SNAKE_CASE = list(chain(*lowerCAmelCase_ ) )
# Tokenize
__SCREAMING_SNAKE_CASE = tokenizer(
lowerCAmelCase_ , lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCAmelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
__SCREAMING_SNAKE_CASE = raw_datasets["train"]
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE = min(len(lowerCAmelCase_ ) , data_args.max_train_samples )
__SCREAMING_SNAKE_CASE = train_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
__SCREAMING_SNAKE_CASE = train_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
__SCREAMING_SNAKE_CASE = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE = min(len(lowerCAmelCase_ ) , data_args.max_eval_samples )
__SCREAMING_SNAKE_CASE = eval_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
__SCREAMING_SNAKE_CASE = eval_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__SCREAMING_SNAKE_CASE = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCAmelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = eval_predictions
__SCREAMING_SNAKE_CASE = np.argmax(lowerCAmelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE = last_checkpoint
__SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
__SCREAMING_SNAKE_CASE = train_result.metrics
__SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("train" , lowerCAmelCase_ )
trainer.save_metrics("train" , lowerCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__SCREAMING_SNAKE_CASE = trainer.evaluate()
__SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("eval" , lowerCAmelCase_ )
trainer.save_metrics("eval" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 54
| 1
|
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[int]=None , **UpperCAmelCase__ : Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = config_class
__SCREAMING_SNAKE_CASE = has_text_modality
__SCREAMING_SNAKE_CASE = kwargs
__SCREAMING_SNAKE_CASE = common_properties
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
__SCREAMING_SNAKE_CASE = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) , msg=F"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCAmelCase__ ):
try:
setattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.parent.assertEqual(
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , msg=F"""`{name} value {idx} expected, but was {getattr(UpperCAmelCase__ , UpperCAmelCase__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCAmelCase__ ):
try:
__SCREAMING_SNAKE_CASE = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , msg=F"""`{name} value {idx} expected, but was {getattr(UpperCAmelCase__ , UpperCAmelCase__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
__SCREAMING_SNAKE_CASE = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> int:
__SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , "config.json" )
config_first.to_json_file(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.config_class.from_json_file(UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.config_class.from_pretrained(UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
__SCREAMING_SNAKE_CASE = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
config_first.save_pretrained(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.config_class.from_pretrained(UpperCAmelCase__ , subfolder=UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__SCREAMING_SNAKE_CASE = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
if self.config_class.is_composition:
return
__SCREAMING_SNAKE_CASE = self.config_class()
self.parent.assertIsNotNone(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str ) -> List[str]:
__SCREAMING_SNAKE_CASE = copy.deepcopy(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.config_class(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(UpperCAmelCase__ , UpperCAmelCase__ ) != value:
wrong_values.append((key, getattr(UpperCAmelCase__ , UpperCAmelCase__ ), value) )
if len(UpperCAmelCase__ ) > 0:
__SCREAMING_SNAKE_CASE = "\n".join([F"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(F"""The following keys were not properly set in the config:\n{errors}""" )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 54
|
"""simple docstring"""
from PIL import Image
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = image.size
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = image.load()
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCAmelCase_ ):
for i in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
a__ : List[str] = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 54
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : int = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 54
|
"""simple docstring"""
from jiwer import compute_measures
import datasets
a__ : Optional[int] = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
a__ : List[str] = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
a__ : Dict = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCamelCase_ ( datasets.Metric):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[Any] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=False ) -> Optional[int]:
if concatenate_texts:
return compute_measures(UpperCAmelCase__ , UpperCAmelCase__ )["wer"]
else:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for prediction, reference in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = compute_measures(UpperCAmelCase__ , UpperCAmelCase__ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 54
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any = CLIPTokenizer
snake_case__ : Dict = CLIPTokenizerFast
snake_case__ : List[Any] = True
snake_case__ : Optional[Any] = {}
snake_case__ : Dict = False
def UpperCAmelCase_ ( self : Any ) -> Any:
super().setUp()
# fmt: off
__SCREAMING_SNAKE_CASE = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
__SCREAMING_SNAKE_CASE = {"unk_token": "<unk>"}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase__ ) )
def UpperCAmelCase_ ( self : List[Any] , **UpperCAmelCase__ : Tuple ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Any , **UpperCAmelCase__ : Optional[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = "lower newer"
__SCREAMING_SNAKE_CASE = "lower newer"
return input_text, output_text
def UpperCAmelCase_ ( self : int ) -> List[str]:
__SCREAMING_SNAKE_CASE = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__SCREAMING_SNAKE_CASE = "lower newer"
__SCREAMING_SNAKE_CASE = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE = [1_0, 2, 1_6, 9, 3, 2, 1_6, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@require_ftfy
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__SCREAMING_SNAKE_CASE = "xa\u0303y" + " " + "x\xe3y"
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Test that the tokenization is identical on unicode of space type
__SCREAMING_SNAKE_CASE = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Test that the tokenization is identical on unicode of line break type
__SCREAMING_SNAKE_CASE = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
__SCREAMING_SNAKE_CASE = F"""{text_of_1_token} {text_of_1_token}"""
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase__ ) + 1, len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
__SCREAMING_SNAKE_CASE = F""" {text}"""
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase__ ) + 1, 1 + len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(UpperCAmelCase__ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
super().test_tokenization_python_rust_equals()
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
# CLIP always lower cases letters
pass
| 54
|
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [0] * no_of_processes
__SCREAMING_SNAKE_CASE = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = burst_time[i]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 9_9999_9999
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowerCAmelCase_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__SCREAMING_SNAKE_CASE = remaining_time[j]
__SCREAMING_SNAKE_CASE = j
__SCREAMING_SNAKE_CASE = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__SCREAMING_SNAKE_CASE = remaining_time[short]
if minm == 0:
__SCREAMING_SNAKE_CASE = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
__SCREAMING_SNAKE_CASE = False
# Find finish time of current process
__SCREAMING_SNAKE_CASE = increment_time + 1
# Calculate waiting time
__SCREAMING_SNAKE_CASE = finish_time - arrival_time[short]
__SCREAMING_SNAKE_CASE = finar - burst_time[short]
if waiting_time[short] < 0:
__SCREAMING_SNAKE_CASE = 0
# Increment time
increment_time += 1
return waiting_time
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [0] * no_of_processes
for i in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = burst_time[i] + waiting_time[i]
return turn_around_time
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for i in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = total_waiting_time + waiting_time[i]
__SCREAMING_SNAKE_CASE = total_turn_around_time + turn_around_time[i]
print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
a__ : Optional[Any] = int(input())
a__ : Optional[int] = [0] * no_of_processes
a__ : int = [0] * no_of_processes
a__ : List[Any] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
a__ , a__ : Tuple = map(int, input().split())
a__ : int = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
a__ : Dict = burst_time
a__ : Any = no_of_processes
a__ : Optional[int] = waiting_time
a__ : Union[str, Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
a__ : str = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 54
| 1
|
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / step_size ) )
__SCREAMING_SNAKE_CASE = np.zeros((n + 1,) )
__SCREAMING_SNAKE_CASE = ya
__SCREAMING_SNAKE_CASE = xa
for k in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = y[k] + step_size * ode_func(lowerCAmelCase_ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
a__ : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , **UpperCAmelCase__ : List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = feature_size
__SCREAMING_SNAKE_CASE = sampling_rate
__SCREAMING_SNAKE_CASE = padding_value
__SCREAMING_SNAKE_CASE = kwargs.pop("padding_side" , "right" )
__SCREAMING_SNAKE_CASE = kwargs.pop("return_attention_mask" , UpperCAmelCase__ )
super().__init__(**UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = True , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(UpperCAmelCase__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__SCREAMING_SNAKE_CASE = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
__SCREAMING_SNAKE_CASE = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(UpperCAmelCase__ ) == 0:
if return_attention_mask:
__SCREAMING_SNAKE_CASE = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__SCREAMING_SNAKE_CASE = required_input[0]
if isinstance(UpperCAmelCase__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__SCREAMING_SNAKE_CASE = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = "tf"
elif is_torch_tensor(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = "pt"
elif isinstance(UpperCAmelCase__ , (int, float, list, tuple, np.ndarray) ):
__SCREAMING_SNAKE_CASE = "np"
else:
raise ValueError(
F"""type of {first_element} unknown: {type(UpperCAmelCase__ )}. """
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__SCREAMING_SNAKE_CASE = to_numpy(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = [to_numpy(UpperCAmelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
__SCREAMING_SNAKE_CASE = self._get_padding_strategies(padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
if not all(len(UpperCAmelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
__SCREAMING_SNAKE_CASE = []
for i in range(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = {k: v[i] for k, v in processed_features.items()}
# truncation
__SCREAMING_SNAKE_CASE = self._truncate(
UpperCAmelCase__ , max_length=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , truncation=UpperCAmelCase__ , )
truncated_inputs.append(UpperCAmelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__SCREAMING_SNAKE_CASE = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH
__SCREAMING_SNAKE_CASE = {}
for i in range(UpperCAmelCase__ ):
# padding
__SCREAMING_SNAKE_CASE = self._pad(
truncated_inputs[i] , max_length=UpperCAmelCase__ , padding_strategy=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
for key, value in outputs.items():
if key not in batch_outputs:
__SCREAMING_SNAKE_CASE = []
if value.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = value.astype(np.floataa )
batch_outputs[key].append(UpperCAmelCase__ )
return BatchFeature(UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[Dict[str, np.ndarray], BatchFeature] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , ) -> dict:
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__SCREAMING_SNAKE_CASE = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(UpperCAmelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__SCREAMING_SNAKE_CASE = np.ones(len(UpperCAmelCase__ ) , dtype=np.intaa )
if needs_to_be_padded:
__SCREAMING_SNAKE_CASE = max_length - len(UpperCAmelCase__ )
if self.padding_side == "right":
if return_attention_mask:
__SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (0, difference) )
__SCREAMING_SNAKE_CASE = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__SCREAMING_SNAKE_CASE = np.pad(
UpperCAmelCase__ , UpperCAmelCase__ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (difference, 0) )
__SCREAMING_SNAKE_CASE = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__SCREAMING_SNAKE_CASE = np.pad(
UpperCAmelCase__ , UpperCAmelCase__ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Union[Dict[str, np.ndarray], BatchFeature] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , ) -> str:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) > max_length
if needs_to_be_truncated:
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__SCREAMING_SNAKE_CASE = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Optional[int]=None ) -> str:
# Get padding strategy
if padding is not False:
if padding is True:
__SCREAMING_SNAKE_CASE = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = PaddingStrategy(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = padding
else:
__SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 54
| 1
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def UpperCAmelCase__ (lowerCAmelCase_ = 100_0000 , lowerCAmelCase_ = 10 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = defaultdict(lowerCAmelCase_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
__SCREAMING_SNAKE_CASE = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
__SCREAMING_SNAKE_CASE = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowerCAmelCase_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"{solution() = }")
| 54
|
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
a__ : Any = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ )
requires_backends(lowerCAmelCase_ , "sklearn" )
return (preds == labels).mean()
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ )
requires_backends(lowerCAmelCase_ , "sklearn" )
__SCREAMING_SNAKE_CASE = simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ )
requires_backends(lowerCAmelCase_ , "sklearn" )
__SCREAMING_SNAKE_CASE = pearsonr(lowerCAmelCase_ , lowerCAmelCase_ )[0]
__SCREAMING_SNAKE_CASE = spearmanr(lowerCAmelCase_ , lowerCAmelCase_ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ )
requires_backends(lowerCAmelCase_ , "sklearn" )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), f"""Predictions and labels have mismatched lengths {len(lowerCAmelCase_ )} and {len(lowerCAmelCase_ )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "mrpc":
return acc_and_fa(lowerCAmelCase_ , lowerCAmelCase_ )
elif task_name == "sts-b":
return pearson_and_spearman(lowerCAmelCase_ , lowerCAmelCase_ )
elif task_name == "qqp":
return acc_and_fa(lowerCAmelCase_ , lowerCAmelCase_ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
else:
raise KeyError(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
warnings.warn(lowerCAmelCase_ , lowerCAmelCase_ )
requires_backends(lowerCAmelCase_ , "sklearn" )
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(lowerCAmelCase_ )} and {len(lowerCAmelCase_ )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
else:
raise KeyError(lowerCAmelCase_ )
| 54
| 1
|
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def UpperCAmelCase__ (lowerCAmelCase_ = "" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(lowerCAmelCase_ ).text , "html.parser" )
__SCREAMING_SNAKE_CASE = soup.find_all("td" , attrs="titleColumn" )
__SCREAMING_SNAKE_CASE = soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowerCAmelCase_ , lowerCAmelCase_ )
}
def UpperCAmelCase__ (lowerCAmelCase_ = "IMDb_Top_250_Movies.csv" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_imdb_top_aaa_movies()
with open(lowerCAmelCase_ , "w" , newline="" ) as out_file:
__SCREAMING_SNAKE_CASE = csv.writer(lowerCAmelCase_ )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 54
|
"""simple docstring"""
import math
import random
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = False ):
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
a__ : Tuple = 0.02
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(lowerCAmelCase_ ):
# Forward propagation
__SCREAMING_SNAKE_CASE = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__SCREAMING_SNAKE_CASE = (expected / 100) - layer_a
# Error delta
__SCREAMING_SNAKE_CASE = layer_1_error * sigmoid_function(lowerCAmelCase_ , lowerCAmelCase_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : List[str] = int(input('''Expected value: '''))
a__ : str = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 54
| 1
|
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
a__ : Dict = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 54
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a__ : Tuple = False
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : int ) -> int:
__SCREAMING_SNAKE_CASE = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
image=UpperCAmelCase__ , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" , ).images
__SCREAMING_SNAKE_CASE = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 54
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a__ : str = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 54
|
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
a__ : Union[str, Any] = logging.get_logger(__name__)
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__SCREAMING_SNAKE_CASE = json.loads(lowerCAmelCase_ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__SCREAMING_SNAKE_CASE = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__SCREAMING_SNAKE_CASE = json.loads(lowerCAmelCase_ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCAmelCase_ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : str = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def UpperCAmelCase_ ( self : List[str] ) -> Any:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , UpperCAmelCase__ , )
@cached_property
def UpperCAmelCase_ ( self : List[str] ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
__SCREAMING_SNAKE_CASE = torch.device("cpu" )
__SCREAMING_SNAKE_CASE = 0
elif is_sagemaker_model_parallel_available():
__SCREAMING_SNAKE_CASE = smp.local_rank()
__SCREAMING_SNAKE_CASE = torch.device("cuda" , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
__SCREAMING_SNAKE_CASE = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
__SCREAMING_SNAKE_CASE = torch.device("cuda" , self.local_rank )
__SCREAMING_SNAKE_CASE = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__SCREAMING_SNAKE_CASE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__SCREAMING_SNAKE_CASE = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
__SCREAMING_SNAKE_CASE = torch.device("cuda" , self.local_rank )
__SCREAMING_SNAKE_CASE = 1
if device.type == "cuda":
torch.cuda.set_device(UpperCAmelCase__ )
return device
@property
def UpperCAmelCase_ ( self : Dict ) -> Any:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
return not is_sagemaker_model_parallel_available()
@property
def UpperCAmelCase_ ( self : Tuple ) -> int:
return False
| 54
| 1
|
"""simple docstring"""
# using dfs for finding eulerian path traversal
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True, True
__SCREAMING_SNAKE_CASE = dfs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return path
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = -1
for i in range(lowerCAmelCase_ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
__SCREAMING_SNAKE_CASE = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = check_circuit_or_path(lowerCAmelCase_ , lowerCAmelCase_ )
if check == 3:
print("graph is not Eulerian" )
print("no path" )
return
__SCREAMING_SNAKE_CASE = 1
if check == 2:
__SCREAMING_SNAKE_CASE = odd_node
print("graph has a Euler path" )
if check == 1:
print("graph has a Euler cycle" )
__SCREAMING_SNAKE_CASE = dfs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
print(lowerCAmelCase_ )
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
__SCREAMING_SNAKE_CASE = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
__SCREAMING_SNAKE_CASE = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
__SCREAMING_SNAKE_CASE = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
__SCREAMING_SNAKE_CASE = {
1: [],
2: []
# all degree is zero
}
__SCREAMING_SNAKE_CASE = 10
check_euler(lowerCAmelCase_ , lowerCAmelCase_ )
check_euler(lowerCAmelCase_ , lowerCAmelCase_ )
check_euler(lowerCAmelCase_ , lowerCAmelCase_ )
check_euler(lowerCAmelCase_ , lowerCAmelCase_ )
check_euler(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 54
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__SCREAMING_SNAKE_CASE = [[1, 2, 4], [1, 2, 3, 4]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(UpperCAmelCase__ )
self.assertTrue(isinstance(dc.token_ids , UpperCAmelCase__ ) )
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase_ ( self : Any ) -> int:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__SCREAMING_SNAKE_CASE = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint(UpperCAmelCase__ ) # fails here
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(3 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is True and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase_ ( self : str ) -> List[str]:
__SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 54
| 1
|
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
a__ : Dict = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase__ : int = 1_0_1 ) -> Dict:
__SCREAMING_SNAKE_CASE = length
def __len__( self : Optional[Any] ) -> Tuple:
return self.length
def __getitem__( self : List[Any] , UpperCAmelCase__ : Any ) -> int:
return i
class UpperCamelCase_ :
"""simple docstring"""
def __call__( self : List[Any] , UpperCAmelCase__ : str ) -> str:
return {"input_ids": torch.tensor(UpperCAmelCase__ ), "labels": torch.tensor(UpperCAmelCase__ )}
class UpperCamelCase_ ( nn.Module):
"""simple docstring"""
def __init__( self : List[Any] ) -> Optional[int]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__SCREAMING_SNAKE_CASE = nn.Linear(1_2_0 , 8_0 )
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int]=None ) -> str:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@require_torch_neuroncore
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = F"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
__SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
__SCREAMING_SNAKE_CASE = F"""--output_dir {output_dir}""".split()
__SCREAMING_SNAKE_CASE = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCAmelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@require_torch_multi_gpu
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE = F"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
__SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
__SCREAMING_SNAKE_CASE = F"""--output_dir {output_dir}""".split()
__SCREAMING_SNAKE_CASE = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCAmelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
a__ : Any = HfArgumentParser((TrainingArguments,))
a__ : Tuple = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
F"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_0_1, 4_0, 7]:
a__ : int = DummyDataset(dataset_length)
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list(range(len(lowerCAmelCase_ ) ) )
__SCREAMING_SNAKE_CASE = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
f"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
a__ : str = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
a__ : Dict = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
a__ : List[str] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
a__ : str = 2
a__ : Any = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
a__ : List[str] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
a__ : int = None
| 54
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
'''simple docstring'''
if start is None:
__SCREAMING_SNAKE_CASE = 0
if end is None:
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) - 1
if start >= end:
return
__SCREAMING_SNAKE_CASE = (start + end) // 2
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ )
if sequence[end] < sequence[mid]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sequence[mid], sequence[end]
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 54
| 1
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a__ : Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "The column name of the images in the files."})
snake_case__ : Optional[str] = field(default=UpperCamelCase , metadata={"help": "A folder containing the training data."})
snake_case__ : Optional[str] = field(default=UpperCamelCase , metadata={"help": "A folder containing the validation data."})
snake_case__ : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."})
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE = {}
if self.train_dir is not None:
__SCREAMING_SNAKE_CASE = self.train_dir
if self.validation_dir is not None:
__SCREAMING_SNAKE_CASE = self.validation_dir
__SCREAMING_SNAKE_CASE = data_files if data_files else None
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : str = field(
default=UpperCamelCase , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"})
snake_case__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case__ : str = field(default=UpperCamelCase , metadata={"help": "Name or path of preprocessor config."})
snake_case__ : bool = field(
default=UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
snake_case__ : float = field(
default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."})
snake_case__ : bool = field(
default=UpperCamelCase , metadata={"help": "Whether or not to train with normalized pixel values as target."})
@dataclass
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : float = field(
default=1E-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."})
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.stack([example["pixel_values"] for example in examples] )
return {"pixel_values": pixel_values}
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mae" , lowerCAmelCase_ , lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__SCREAMING_SNAKE_CASE = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCAmelCase_ ) and data_args.train_val_split > 0.0:
__SCREAMING_SNAKE_CASE = ds["train"].train_test_split(data_args.train_val_split )
__SCREAMING_SNAKE_CASE = split["train"]
__SCREAMING_SNAKE_CASE = split["test"]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
__SCREAMING_SNAKE_CASE = ViTMAEConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_ )
else:
__SCREAMING_SNAKE_CASE = ViTMAEConfig()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
"mask_ratio": model_args.mask_ratio,
"norm_pix_loss": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
__SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_ )
else:
__SCREAMING_SNAKE_CASE = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
__SCREAMING_SNAKE_CASE = ViTMAEForPreTraining(lowerCAmelCase_ )
if training_args.do_train:
__SCREAMING_SNAKE_CASE = ds["train"].column_names
else:
__SCREAMING_SNAKE_CASE = ds["validation"].column_names
if data_args.image_column_name is not None:
__SCREAMING_SNAKE_CASE = data_args.image_column_name
elif "image" in column_names:
__SCREAMING_SNAKE_CASE = "image"
elif "img" in column_names:
__SCREAMING_SNAKE_CASE = "img"
else:
__SCREAMING_SNAKE_CASE = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
__SCREAMING_SNAKE_CASE = image_processor.size["shortest_edge"]
else:
__SCREAMING_SNAKE_CASE = (image_processor.size["height"], image_processor.size["width"])
__SCREAMING_SNAKE_CASE = Compose(
[
Lambda(lambda lowerCAmelCase_ : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowerCAmelCase_ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = [transforms(lowerCAmelCase_ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowerCAmelCase_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowerCAmelCase_ )
# Compute absolute learning rate
__SCREAMING_SNAKE_CASE = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
__SCREAMING_SNAKE_CASE = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE = last_checkpoint
__SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__SCREAMING_SNAKE_CASE = trainer.evaluate()
trainer.log_metrics("eval" , lowerCAmelCase_ )
trainer.save_metrics("eval" , lowerCAmelCase_ )
# Write model card and (optionally) push to hub
__SCREAMING_SNAKE_CASE = {
"tasks": "masked-auto-encoding",
"dataset": data_args.dataset_name,
"tags": ["masked-auto-encoding"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 54
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
__SCREAMING_SNAKE_CASE = len(bin(lowerCAmelCase_ )[3:] )
__SCREAMING_SNAKE_CASE = bin(abs(lowerCAmelCase_ ) - (1 << binary_number_length) )[3:]
__SCREAMING_SNAKE_CASE = (
(
"1"
+ "0" * (binary_number_length - len(lowerCAmelCase_ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
| 1
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
a__ : Tuple = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def UpperCAmelCase__ (lowerCAmelCase_=None ):
'''simple docstring'''
if subparsers is not None:
__SCREAMING_SNAKE_CASE = subparsers.add_parser("tpu-config" , description=_description )
else:
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
__SCREAMING_SNAKE_CASE = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=lowerCAmelCase_ , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=lowerCAmelCase_ , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
__SCREAMING_SNAKE_CASE = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=lowerCAmelCase_ , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__SCREAMING_SNAKE_CASE = defaults.command_file
if not args.command and defaults.commands is not None:
__SCREAMING_SNAKE_CASE = defaults.commands
if not args.tpu_name:
__SCREAMING_SNAKE_CASE = defaults.tpu_name
if not args.tpu_zone:
__SCREAMING_SNAKE_CASE = defaults.tpu_zone
if args.accelerate_version == "dev":
__SCREAMING_SNAKE_CASE = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
__SCREAMING_SNAKE_CASE = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = f"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
__SCREAMING_SNAKE_CASE = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__SCREAMING_SNAKE_CASE = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [f"""pip install {args.accelerate_version}"""]
new_cmd += args.command
__SCREAMING_SNAKE_CASE = "; ".join(lowerCAmelCase_ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__SCREAMING_SNAKE_CASE = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"""Running {' '.join(lowerCAmelCase_ )}""" )
return
subprocess.run(lowerCAmelCase_ )
print("Successfully setup pod." )
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tpu_command_parser()
__SCREAMING_SNAKE_CASE = parser.parse_args()
tpu_command_launcher(lowerCAmelCase_ )
| 54
|
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any = CLIPTokenizer
snake_case__ : Dict = CLIPTokenizerFast
snake_case__ : List[Any] = True
snake_case__ : Optional[Any] = {}
snake_case__ : Dict = False
def UpperCAmelCase_ ( self : Any ) -> Any:
super().setUp()
# fmt: off
__SCREAMING_SNAKE_CASE = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
__SCREAMING_SNAKE_CASE = {"unk_token": "<unk>"}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase__ ) )
def UpperCAmelCase_ ( self : List[Any] , **UpperCAmelCase__ : Tuple ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Any , **UpperCAmelCase__ : Optional[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = "lower newer"
__SCREAMING_SNAKE_CASE = "lower newer"
return input_text, output_text
def UpperCAmelCase_ ( self : int ) -> List[str]:
__SCREAMING_SNAKE_CASE = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__SCREAMING_SNAKE_CASE = "lower newer"
__SCREAMING_SNAKE_CASE = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE = [1_0, 2, 1_6, 9, 3, 2, 1_6, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@require_ftfy
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__SCREAMING_SNAKE_CASE = "xa\u0303y" + " " + "x\xe3y"
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Test that the tokenization is identical on unicode of space type
__SCREAMING_SNAKE_CASE = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Test that the tokenization is identical on unicode of line break type
__SCREAMING_SNAKE_CASE = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
__SCREAMING_SNAKE_CASE = F"""{text_of_1_token} {text_of_1_token}"""
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase__ ) + 1, len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
__SCREAMING_SNAKE_CASE = F""" {text}"""
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase__ ) + 1, 1 + len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(UpperCAmelCase__ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
super().test_tokenization_python_rust_equals()
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
# CLIP always lower cases letters
pass
| 54
| 1
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number_of_steps > 0
), f"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1, 1
for _ in range(number_of_steps - 1 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(lowerCAmelCase_ , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = _distribute_shards(**lowerCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = _split_gen_kwargs(lowerCAmelCase_ , lowerCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(lowerCAmelCase_ ):
_number_of_shards_in_gen_kwargs(lowerCAmelCase_ )
else:
__SCREAMING_SNAKE_CASE = _number_of_shards_in_gen_kwargs(lowerCAmelCase_ )
assert out == expected
| 54
| 1
|
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
a__ : Optional[Any] = 1_0
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
if array[i] == target:
return i
return -1
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = (left + right) // 3 + 1
__SCREAMING_SNAKE_CASE = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__SCREAMING_SNAKE_CASE = one_third - 1
elif array[two_third] < target:
__SCREAMING_SNAKE_CASE = two_third + 1
else:
__SCREAMING_SNAKE_CASE = one_third + 1
__SCREAMING_SNAKE_CASE = two_third - 1
else:
return -1
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = (left + right) // 3 + 1
__SCREAMING_SNAKE_CASE = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : List[Any] = input('''Enter numbers separated by comma:\n''').strip()
a__ : Any = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
a__ : List[str] = int(input('''Enter the number to be found in the list:\n''').strip())
a__ : Optional[Any] = ite_ternary_search(collection, target)
a__ : Optional[int] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"Iterative search: {target} found at positions: {resulta}")
print(F"Recursive search: {target} found at positions: {resulta}")
else:
print('''Not found''')
| 54
|
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return x + 2
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Any ) -> Any:
__SCREAMING_SNAKE_CASE = "x = 3"
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
assert result == 3
self.assertDictEqual(UpperCAmelCase__ , {"x": 3} )
__SCREAMING_SNAKE_CASE = "x = y"
__SCREAMING_SNAKE_CASE = {"y": 5}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase__ , {"x": 5, "y": 5} )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
__SCREAMING_SNAKE_CASE = "y = add_two(x)"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ )
assert result == 5
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = "x = 3"
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
assert result == 3
self.assertDictEqual(UpperCAmelCase__ , {"x": 3} )
def UpperCAmelCase_ ( self : str ) -> Any:
__SCREAMING_SNAKE_CASE = "test_dict = {'x': x, 'y': add_two(x)}"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ )
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 5} )
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = "x = 3\ny = 5"
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 5} )
def UpperCAmelCase_ ( self : Any ) -> Any:
__SCREAMING_SNAKE_CASE = "text = f'This is x: {x}.'"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "text": "This is x: 3."} )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = "if x <= 3:\n y = 2\nelse:\n y = 5"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 2} )
__SCREAMING_SNAKE_CASE = {"x": 8}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase__ , {"x": 8, "y": 5} )
def UpperCAmelCase_ ( self : Tuple ) -> str:
__SCREAMING_SNAKE_CASE = "test_list = [x, add_two(x)]"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , [3, 5] )
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "test_list": [3, 5]} )
def UpperCAmelCase_ ( self : Any ) -> int:
__SCREAMING_SNAKE_CASE = "y = x"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
assert result == 3
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 3} )
def UpperCAmelCase_ ( self : Tuple ) -> int:
__SCREAMING_SNAKE_CASE = "test_list = [x, add_two(x)]\ntest_list[1]"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ )
assert result == 5
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "test_list": [3, 5]} )
__SCREAMING_SNAKE_CASE = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ )
assert result == 5
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE = "x = 0\nfor i in range(3):\n x = i"
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"range": range} , state=UpperCAmelCase__ )
assert result == 2
self.assertDictEqual(UpperCAmelCase__ , {"x": 2, "i": 2} )
| 54
| 1
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : Dict = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
a__ : Dict = {
'''gpt-neox-20b''': 2_0_4_8,
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : List[Any] = VOCAB_FILES_NAMES
snake_case__ : Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Union[str, Any]="<|endoftext|>" , UpperCAmelCase__ : Optional[Any]="<|endoftext|>" , UpperCAmelCase__ : List[str]="<|endoftext|>" , UpperCAmelCase__ : List[Any]=False , **UpperCAmelCase__ : Optional[int] , ) -> Optional[Any]:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , **UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase__ ) != add_prefix_space:
__SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase__ , pre_tok_state.pop("type" ) )
__SCREAMING_SNAKE_CASE = add_prefix_space
__SCREAMING_SNAKE_CASE = pre_tok_class(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = add_prefix_space
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : "Conversation" ) -> List[int]:
__SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) + [self.eos_token_id] )
if len(UpperCAmelCase__ ) > self.model_max_length:
__SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 54
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : str = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 54
| 1
|
"""simple docstring"""
import argparse
import os
import re
a__ : Tuple = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a__ : Tuple = re.compile(r'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
a__ : Any = re.compile(r'''\s*\(\s*"(\S[^"]+)"''')
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = False ):
'''simple docstring'''
with open(lowerCAmelCase_ , "r" , encoding="utf-8" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
__SCREAMING_SNAKE_CASE = content.split("\n" )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
while line_idx < len(lowerCAmelCase_ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__SCREAMING_SNAKE_CASE = len(re.search(R"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
__SCREAMING_SNAKE_CASE = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__SCREAMING_SNAKE_CASE = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__SCREAMING_SNAKE_CASE = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : _re_identifier.search(lowerCAmelCase_ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write("\n".join(lowerCAmelCase_ ) )
elif "\n".join(lowerCAmelCase_ ) != content:
return True
def UpperCAmelCase__ (lowerCAmelCase_ = False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) for f in os.listdir(lowerCAmelCase_ ) if f.endswith(".py" )]
__SCREAMING_SNAKE_CASE = [sort_auto_mapping(lowerCAmelCase_ , overwrite=lowerCAmelCase_ ) for fname in fnames]
if not overwrite and any(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = [f for f, d in zip(lowerCAmelCase_ , lowerCAmelCase_ ) if d]
raise ValueError(
f"""The following files have auto mappings that need sorting: {', '.join(lowerCAmelCase_ )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
a__ : int = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 54
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
a__ : Tuple = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def UpperCAmelCase__ (lowerCAmelCase_=None ):
'''simple docstring'''
if subparsers is not None:
__SCREAMING_SNAKE_CASE = subparsers.add_parser("tpu-config" , description=_description )
else:
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
__SCREAMING_SNAKE_CASE = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=lowerCAmelCase_ , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=lowerCAmelCase_ , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
__SCREAMING_SNAKE_CASE = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=lowerCAmelCase_ , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__SCREAMING_SNAKE_CASE = defaults.command_file
if not args.command and defaults.commands is not None:
__SCREAMING_SNAKE_CASE = defaults.commands
if not args.tpu_name:
__SCREAMING_SNAKE_CASE = defaults.tpu_name
if not args.tpu_zone:
__SCREAMING_SNAKE_CASE = defaults.tpu_zone
if args.accelerate_version == "dev":
__SCREAMING_SNAKE_CASE = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
__SCREAMING_SNAKE_CASE = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = f"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
__SCREAMING_SNAKE_CASE = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__SCREAMING_SNAKE_CASE = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [f"""pip install {args.accelerate_version}"""]
new_cmd += args.command
__SCREAMING_SNAKE_CASE = "; ".join(lowerCAmelCase_ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__SCREAMING_SNAKE_CASE = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"""Running {' '.join(lowerCAmelCase_ )}""" )
return
subprocess.run(lowerCAmelCase_ )
print("Successfully setup pod." )
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tpu_command_parser()
__SCREAMING_SNAKE_CASE = parser.parse_args()
tpu_command_launcher(lowerCAmelCase_ )
| 54
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.