code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import pytest
lowercase_ = "__dummy_dataset1__"
lowercase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def lowercase ( ) -> Any:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowercase ( ) -> int:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ) -> Any:
__a = dataset_loading_script_name
__a = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=lowerCAmelCase__ )
__a = script_dir / f'''{script_name}.py'''
with open(lowerCAmelCase__ , '''w''' ) as f:
f.write(lowerCAmelCase__ )
return str(lowerCAmelCase__ )
| 695
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=False ) -> Any:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = len(set_a.intersection(lowerCAmelCase__ ) )
if alternative_union:
__a = len(lowerCAmelCase__ ) + len(lowerCAmelCase__ )
else:
__a = len(set_a.union(lowerCAmelCase__ ) )
return intersection / union
if isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(lowerCAmelCase__ , (list, tuple) ):
__a = [element for element in set_a if element in set_b]
if alternative_union:
__a = len(lowerCAmelCase__ ) + len(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) / union
else:
__a = set_a + [element for element in set_b if element not in set_a]
return len(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
return None
if __name__ == "__main__":
lowercase_ = {"a", "b", "c", "d", "e"}
lowercase_ = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 695
| 1
|
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , __a , )
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = RobertaConfig
lowercase__ = '''roberta'''
def __init__( self : Any , UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase__ )
__UpperCamelCase =RobertaEmbeddings(UpperCamelCase__ )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , __a , )
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = RobertaConfig
lowercase__ = '''roberta'''
def __init__( self : int , UpperCamelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
super().__init__(UpperCamelCase__ )
__UpperCamelCase =config.num_labels
__UpperCamelCase =config.num_hidden_layers
__UpperCamelCase =DeeRobertaModel(UpperCamelCase__ )
__UpperCamelCase =nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase =nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
def UpperCAmelCase_ ( self : Tuple , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=-1 , UpperCamelCase__ : Dict=False , ) -> str:
'''simple docstring'''
__UpperCamelCase =self.num_layers
try:
__UpperCamelCase =self.roberta(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , position_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , )
__UpperCamelCase =outputs[1]
__UpperCamelCase =self.dropout(UpperCamelCase__ )
__UpperCamelCase =self.classifier(UpperCamelCase__ )
__UpperCamelCase =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase =e.message
__UpperCamelCase =e.exit_layer
__UpperCamelCase =outputs[0]
if not self.training:
__UpperCamelCase =entropy(UpperCamelCase__ )
__UpperCamelCase =[]
__UpperCamelCase =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase =MSELoss()
__UpperCamelCase =loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase =CrossEntropyLoss()
__UpperCamelCase =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__UpperCamelCase =[]
for highway_exit in outputs[-1]:
__UpperCamelCase =highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase =MSELoss()
__UpperCamelCase =loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase =CrossEntropyLoss()
__UpperCamelCase =loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase__ )
if train_highway:
__UpperCamelCase =(sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase =(loss,) + outputs
if not self.training:
__UpperCamelCase =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 713
|
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
__lowercase = logging.get_logger(__name__)
def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
try:
with open(__UpperCamelCase , '''rb''' ) as flax_state_f:
__UpperCamelCase =from_bytes(__UpperCamelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(__UpperCamelCase ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Tuple , __UpperCamelCase : List[str] ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
__UpperCamelCase =flatten_dict(jax.tree_util.tree_map(lambda __UpperCamelCase : x.dtype == jnp.bfloataa , __UpperCamelCase ) ).values()
if any(__UpperCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
__UpperCamelCase =jax.tree_util.tree_map(
lambda __UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __UpperCamelCase )
__UpperCamelCase =''''''
__UpperCamelCase =flatten_dict(__UpperCamelCase , sep='''.''' )
__UpperCamelCase =pt_model.state_dict()
# keep track of unexpected & missing keys
__UpperCamelCase =[]
__UpperCamelCase =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__UpperCamelCase =flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__UpperCamelCase =flax_key_tuple_array[:-1] + ['''weight''']
__UpperCamelCase =jnp.transpose(__UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__UpperCamelCase =flax_key_tuple_array[:-1] + ['''weight''']
__UpperCamelCase =flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__UpperCamelCase =flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(__UpperCamelCase ):
__UpperCamelCase =(
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
__UpperCamelCase ='''.'''.join(__UpperCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
__UpperCamelCase =np.asarray(__UpperCamelCase ) if not isinstance(__UpperCamelCase , np.ndarray ) else flax_tensor
__UpperCamelCase =torch.from_numpy(__UpperCamelCase )
# remove from missing keys
missing_keys.remove(__UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__UpperCamelCase )
pt_model.load_state_dict(__UpperCamelCase )
# re-transform missing_keys to list
__UpperCamelCase =list(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(__UpperCamelCase ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
''' use it for predictions and inference.''' )
return pt_model
| 296
| 0
|
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_UpperCAmelCase ):
_A = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
_A = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_UpperCAmelCase ):
_A = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
_A = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : Tuple ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
_A = AutoTokenizer.from_pretrained(_UpperCAmelCase )
_A = FlaxBertModel.from_pretrained(_UpperCAmelCase )
_A = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase : List[str] ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
@slow
def lowerCAmelCase_ ( self : List[str] ):
for model_name in ["roberta-base", "roberta-large"]:
_A = AutoTokenizer.from_pretrained(_UpperCAmelCase )
_A = FlaxRobertaModel.from_pretrained(_UpperCAmelCase )
_A = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase : Dict ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
def lowerCAmelCase_ ( self : Dict ):
with self.assertRaisesRegex(
_UpperCAmelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
_A = FlaxAutoModel.from_pretrained('bert-base' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
_UpperCAmelCase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_A = FlaxAutoModel.from_pretrained(_UpperCAmelCase , revision='aaaaaa' )
def lowerCAmelCase_ ( self : List[Any] ):
with self.assertRaisesRegex(
_UpperCAmelCase , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
_A = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def lowerCAmelCase_ ( self : Tuple ):
with self.assertRaisesRegex(_UpperCAmelCase , 'Use `from_pt=True` to load this model' ):
_A = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 7
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class __SCREAMING_SNAKE_CASE :
@property
def __lowerCamelCase ( self ):
return self.get_dummy_input()
@property
def __lowerCamelCase ( self ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , ):
lowercase : Optional[int] = 4
lowercase : Dict = 32
lowercase : List[str] = (32, 32)
lowercase : Optional[int] = torch.manual_seed(0 )
lowercase : Optional[int] = torch.device(SCREAMING_SNAKE_CASE__ )
lowercase : int = (batch_size, num_channels) + sizes
lowercase : str = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = {'''hidden_states''': hidden_states}
if include_temb:
lowercase : List[Any] = 128
lowercase : List[Any] = randn_tensor((batch_size, temb_channels) , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
if include_res_hidden_states_tuple:
lowercase : List[Any] = torch.manual_seed(1 )
lowercase : Optional[Any] = (randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ ),)
if include_encoder_hidden_states:
lowercase : Optional[Any] = floats_tensor((batch_size, 32, 32) ).to(SCREAMING_SNAKE_CASE__ )
if include_skip_sample:
lowercase : Dict = randn_tensor(((batch_size, 3) + sizes) , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
return dummy_input
def __lowerCamelCase ( self ):
lowercase : Optional[int] = {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 128,
}
if self.block_type == "up":
lowercase : Optional[int] = 32
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
lowercase : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : str = self.prepare_init_args_and_inputs_for_common()
lowercase : List[str] = self.block_class(**SCREAMING_SNAKE_CASE__ )
unet_block.to(SCREAMING_SNAKE_CASE__ )
unet_block.eval()
with torch.no_grad():
lowercase : Tuple = unet_block(**SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = output[0]
self.assertEqual(output.shape , self.output_shape )
lowercase : Optional[Any] = output[0, -1, -3:, -3:]
lowercase : Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
assert torch_all_close(output_slice.flatten() , SCREAMING_SNAKE_CASE__ , atol=5E-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def __lowerCamelCase ( self ):
lowercase , lowercase : Dict = self.prepare_init_args_and_inputs_for_common()
lowercase : Optional[int] = self.block_class(**SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
lowercase : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = output[0]
lowercase : int = torch.device(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = randn_tensor(output.shape , device=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
loss.backward()
| 319
| 0
|
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 720
|
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
_a , _a = 9, 14 # noqa: F841
_a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_a = defaultdict(__lowerCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_a = mst(__lowerCamelCase )
_a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_a = tuple(answer[:2] )
_a = tuple(edge[::-1] )
assert edge in result or reverse in result
| 276
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Any = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class SCREAMING_SNAKE_CASE ( snake_case_ ):
__magic_name__ : Union[str, Any] = "ctrl"
__magic_name__ : Tuple = ["past_key_values"]
__magic_name__ : Optional[Any] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , lowercase__ : List[str]=24_6534 , lowercase__ : Optional[int]=256 , lowercase__ : Optional[Any]=1280 , lowercase__ : Optional[int]=8192 , lowercase__ : Any=48 , lowercase__ : Optional[Any]=16 , lowercase__ : Union[str, Any]=0.1 , lowercase__ : str=0.1 , lowercase__ : Union[str, Any]=1e-6 , lowercase__ : Any=0.02 , lowercase__ : List[str]=True , **lowercase__ : Tuple , ):
'''simple docstring'''
a_ : List[Any] = vocab_size
a_ : str = n_positions
a_ : List[Any] = n_embd
a_ : Union[str, Any] = n_layer
a_ : List[Any] = n_head
a_ : Tuple = dff
a_ : List[Any] = resid_pdrop
a_ : Any = embd_pdrop
a_ : Dict = layer_norm_epsilon
a_ : Tuple = initializer_range
a_ : List[Any] = use_cache
super().__init__(**__UpperCAmelCase )
| 442
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 191
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Dict = KandinskyInpaintPipeline
lowerCAmelCase : int = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
lowerCAmelCase : Any = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
lowerCAmelCase : Optional[Any] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCAmelCase : int = False
@property
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
return 32
@property
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def UpperCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return 100
@property
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : str = MCLIPConfig(
numDims=self.cross_attention_dim ,transformerDimensions=self.text_embedder_hidden_size ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=1_005 ,)
lowercase__ : List[Any] = MultilingualCLIP(_snake_case )
lowercase__ : Any = text_encoder.eval()
return text_encoder
@property
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : int = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase__ : Tuple = UNetaDConditionModel(**_snake_case )
return model
@property
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = self.dummy_text_encoder
lowercase__ : Tuple = self.dummy_tokenizer
lowercase__ : List[Any] = self.dummy_unet
lowercase__ : Any = self.dummy_movq
lowercase__ : List[Any] = DDIMScheduler(
num_train_timesteps=1_000 ,beta_schedule='''linear''' ,beta_start=0.0_0085 ,beta_end=0.012 ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,steps_offset=1 ,prediction_type='''epsilon''' ,thresholding=_snake_case ,)
lowercase__ : Dict = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Dict ,_snake_case : int=0 ) -> str:
"""simple docstring"""
lowercase__ : Tuple = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(_snake_case ) ).to(_snake_case )
lowercase__ : Optional[int] = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(seed + 1 ) ).to(_snake_case )
# create init_image
lowercase__ : int = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_snake_case ) ).to(_snake_case )
lowercase__ : Union[str, Any] = image.cpu().permute(0 ,2 ,3 ,1 )[0]
lowercase__ : List[Any] = Image.fromarray(np.uinta(_snake_case ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
lowercase__ : Any = np.ones((64, 64) ,dtype=np.floataa )
lowercase__ : List[Any] = 0
if str(_snake_case ).startswith('''mps''' ):
lowercase__ : str = torch.manual_seed(_snake_case )
else:
lowercase__ : List[Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : Union[str, Any] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = '''cpu'''
lowercase__ : List[str] = self.get_dummy_components()
lowercase__ : Optional[int] = self.pipeline_class(**_snake_case )
lowercase__ : Dict = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : List[str] = pipe(**self.get_dummy_inputs(_snake_case ) )
lowercase__ : Optional[int] = output.images
lowercase__ : Union[str, Any] = pipe(
**self.get_dummy_inputs(_snake_case ) ,return_dict=_snake_case ,)[0]
lowercase__ : List[str] = image[0, -3:, -3:, -1]
lowercase__ : Any = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
lowercase__ : int = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
lowercase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowercase__ : int = np.ones((768, 768) ,dtype=np.floataa )
lowercase__ : Tuple = 0
lowercase__ : Union[str, Any] = '''a hat'''
lowercase__ : List[Any] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' ,torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
lowercase__ : Optional[Any] = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' ,torch_dtype=torch.floataa )
lowercase__ : Any = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
lowercase__ : int = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ , lowercase__ : Optional[Any] = pipe_prior(
_snake_case ,generator=_snake_case ,num_inference_steps=5 ,negative_prompt='''''' ,).to_tuple()
lowercase__ : Dict = pipeline(
_snake_case ,image=_snake_case ,mask_image=_snake_case ,image_embeds=_snake_case ,negative_image_embeds=_snake_case ,generator=_snake_case ,num_inference_steps=100 ,height=768 ,width=768 ,output_type='''np''' ,)
lowercase__ : str = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_snake_case ,_snake_case )
| 122
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Dict = KandinskyInpaintPipeline
lowerCAmelCase : int = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
lowerCAmelCase : Any = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
lowerCAmelCase : Optional[Any] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCAmelCase : int = False
@property
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
return 32
@property
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def UpperCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return 100
@property
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : str = MCLIPConfig(
numDims=self.cross_attention_dim ,transformerDimensions=self.text_embedder_hidden_size ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=1_005 ,)
lowercase__ : List[Any] = MultilingualCLIP(_snake_case )
lowercase__ : Any = text_encoder.eval()
return text_encoder
@property
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : int = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase__ : Tuple = UNetaDConditionModel(**_snake_case )
return model
@property
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = self.dummy_text_encoder
lowercase__ : Tuple = self.dummy_tokenizer
lowercase__ : List[Any] = self.dummy_unet
lowercase__ : Any = self.dummy_movq
lowercase__ : List[Any] = DDIMScheduler(
num_train_timesteps=1_000 ,beta_schedule='''linear''' ,beta_start=0.0_0085 ,beta_end=0.012 ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,steps_offset=1 ,prediction_type='''epsilon''' ,thresholding=_snake_case ,)
lowercase__ : Dict = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Dict ,_snake_case : int=0 ) -> str:
"""simple docstring"""
lowercase__ : Tuple = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(_snake_case ) ).to(_snake_case )
lowercase__ : Optional[int] = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(seed + 1 ) ).to(_snake_case )
# create init_image
lowercase__ : int = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_snake_case ) ).to(_snake_case )
lowercase__ : Union[str, Any] = image.cpu().permute(0 ,2 ,3 ,1 )[0]
lowercase__ : List[Any] = Image.fromarray(np.uinta(_snake_case ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
lowercase__ : Any = np.ones((64, 64) ,dtype=np.floataa )
lowercase__ : List[Any] = 0
if str(_snake_case ).startswith('''mps''' ):
lowercase__ : str = torch.manual_seed(_snake_case )
else:
lowercase__ : List[Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : Union[str, Any] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = '''cpu'''
lowercase__ : List[str] = self.get_dummy_components()
lowercase__ : Optional[int] = self.pipeline_class(**_snake_case )
lowercase__ : Dict = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : List[str] = pipe(**self.get_dummy_inputs(_snake_case ) )
lowercase__ : Optional[int] = output.images
lowercase__ : Union[str, Any] = pipe(
**self.get_dummy_inputs(_snake_case ) ,return_dict=_snake_case ,)[0]
lowercase__ : List[str] = image[0, -3:, -3:, -1]
lowercase__ : Any = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
lowercase__ : int = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
lowercase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowercase__ : int = np.ones((768, 768) ,dtype=np.floataa )
lowercase__ : Tuple = 0
lowercase__ : Union[str, Any] = '''a hat'''
lowercase__ : List[Any] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' ,torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
lowercase__ : Optional[Any] = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' ,torch_dtype=torch.floataa )
lowercase__ : Any = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
lowercase__ : int = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ , lowercase__ : Optional[Any] = pipe_prior(
_snake_case ,generator=_snake_case ,num_inference_steps=5 ,negative_prompt='''''' ,).to_tuple()
lowercase__ : Dict = pipeline(
_snake_case ,image=_snake_case ,mask_image=_snake_case ,image_embeds=_snake_case ,negative_image_embeds=_snake_case ,generator=_snake_case ,num_inference_steps=100 ,height=768 ,width=768 ,output_type='''np''' ,)
lowercase__ : str = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_snake_case ,_snake_case )
| 122
| 1
|
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCamelCase__ :
def __init__( self : Tuple, __lowerCamelCase : List[str], __lowerCamelCase : int=13, __lowerCamelCase : Tuple=7, __lowerCamelCase : Union[str, Any]=True, __lowerCamelCase : List[Any]=True, __lowerCamelCase : Optional[Any]=99, __lowerCamelCase : int=32, __lowerCamelCase : Union[str, Any]=5, __lowerCamelCase : List[str]=4, __lowerCamelCase : List[str]=37, __lowerCamelCase : Any="gelu", __lowerCamelCase : int=0.1, __lowerCamelCase : Union[str, Any]=0.1, __lowerCamelCase : Optional[int]=50, __lowerCamelCase : Union[str, Any]=0.02, __lowerCamelCase : Dict=True, __lowerCamelCase : Any=None, ) -> Tuple:
UpperCamelCase__ : List[Any] = parent
UpperCamelCase__ : Optional[Any] = batch_size
UpperCamelCase__ : str = seq_length
UpperCamelCase__ : Union[str, Any] = is_training
UpperCamelCase__ : int = use_input_mask
UpperCamelCase__ : List[str] = vocab_size
UpperCamelCase__ : int = hidden_size
UpperCamelCase__ : Optional[Any] = num_hidden_layers
UpperCamelCase__ : Optional[Any] = num_attention_heads
UpperCamelCase__ : Tuple = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : str = hidden_dropout_prob
UpperCamelCase__ : int = attention_probs_dropout_prob
UpperCamelCase__ : List[Any] = max_position_embeddings
UpperCamelCase__ : Any = initializer_range
UpperCamelCase__ : Any = use_labels
UpperCamelCase__ : Optional[int] = scope
def __lowercase( self : Dict ) -> Optional[int]:
UpperCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase__ : int = None
if self.use_input_mask:
UpperCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
UpperCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __lowercase( self : List[str] ) -> Tuple:
return BertGenerationConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=A_, initializer_range=self.initializer_range, )
def __lowercase( self : int ) -> int:
(
UpperCamelCase__
) : str = self.prepare_config_and_inputs()
UpperCamelCase__ : int = True
UpperCamelCase__ : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowercase( self : List[str], __lowerCamelCase : Optional[int], __lowerCamelCase : List[str], __lowerCamelCase : str, __lowerCamelCase : int, **__lowerCamelCase : int, ) -> Any:
UpperCamelCase__ : Tuple = BertGenerationEncoder(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase__ : Dict = model(A_, attention_mask=A_ )
UpperCamelCase__ : Optional[Any] = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase( self : Any, __lowerCamelCase : Dict, __lowerCamelCase : Dict, __lowerCamelCase : Union[str, Any], __lowerCamelCase : Dict, __lowerCamelCase : Optional[int], __lowerCamelCase : Tuple, **__lowerCamelCase : Optional[int], ) -> str:
UpperCamelCase__ : Any = True
UpperCamelCase__ : List[str] = BertGenerationEncoder(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase__ : List[str] = model(
A_, attention_mask=A_, encoder_hidden_states=A_, encoder_attention_mask=A_, )
UpperCamelCase__ : int = model(
A_, attention_mask=A_, encoder_hidden_states=A_, )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase( self : Optional[Any], __lowerCamelCase : Optional[Any], __lowerCamelCase : Dict, __lowerCamelCase : Optional[int], __lowerCamelCase : List[Any], __lowerCamelCase : List[Any], __lowerCamelCase : Optional[int], **__lowerCamelCase : str, ) -> Dict:
UpperCamelCase__ : Any = True
UpperCamelCase__ : Union[str, Any] = True
UpperCamelCase__ : int = BertGenerationDecoder(config=A_ ).to(A_ ).eval()
# first forward pass
UpperCamelCase__ : Optional[Any] = model(
A_, attention_mask=A_, encoder_hidden_states=A_, encoder_attention_mask=A_, use_cache=A_, )
UpperCamelCase__ : Any = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ : Any = ids_tensor((self.batch_size, 3), config.vocab_size )
UpperCamelCase__ : Optional[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ : int = torch.cat([input_ids, next_tokens], dim=-1 )
UpperCamelCase__ : Union[str, Any] = torch.cat([input_mask, next_mask], dim=-1 )
UpperCamelCase__ : Dict = model(
A_, attention_mask=A_, encoder_hidden_states=A_, encoder_attention_mask=A_, output_hidden_states=A_, )["hidden_states"][0]
UpperCamelCase__ : Dict = model(
A_, attention_mask=A_, encoder_hidden_states=A_, encoder_attention_mask=A_, past_key_values=A_, output_hidden_states=A_, )["hidden_states"][0]
# select random slice
UpperCamelCase__ : Any = ids_tensor((1,), output_from_past.shape[-1] ).item()
UpperCamelCase__ : int = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_, A_, atol=1e-3 ) )
def __lowercase( self : Any, __lowerCamelCase : Any, __lowerCamelCase : Optional[Any], __lowerCamelCase : Union[str, Any], __lowerCamelCase : Optional[int], *__lowerCamelCase : Union[str, Any], ) -> List[Any]:
UpperCamelCase__ : Optional[int] = BertGenerationDecoder(A_ )
model.to(A_ )
model.eval()
UpperCamelCase__ : Any = model(A_, attention_mask=A_, labels=A_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase( self : Union[str, Any] ) -> Dict:
UpperCamelCase__ : Dict = self.prepare_config_and_inputs()
UpperCamelCase__ : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
a__ : List[Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
a__ : Any = (BertGenerationDecoder,) if is_torch_available() else ()
a__ : Tuple = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __lowercase( self : str ) -> Any:
UpperCamelCase__ : Optional[Any] = BertGenerationEncoderTester(self )
UpperCamelCase__ : Dict = ConfigTester(self, config_class=A_, hidden_size=37 )
def __lowercase( self : Tuple ) -> Optional[int]:
self.config_tester.run_common_tests()
def __lowercase( self : int ) -> Union[str, Any]:
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __lowercase( self : int ) -> List[Any]:
UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ : Optional[Any] = "bert"
self.model_tester.create_and_check_model(A_, A_, A_, A_ )
def __lowercase( self : List[str] ) -> List[str]:
UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def __lowercase( self : Union[str, Any] ) -> Any:
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*A_ )
def __lowercase( self : List[str] ) -> Optional[Any]:
(
UpperCamelCase__
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
A_, A_, A_, A_, A_, A_, )
def __lowercase( self : Union[str, Any] ) -> Optional[int]:
UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*A_ )
@slow
def __lowercase( self : Any ) -> int:
UpperCamelCase__ : Union[str, Any] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(A_ )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def __lowercase( self : int ) -> Tuple:
UpperCamelCase__ : Any = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
UpperCamelCase__ : int = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
UpperCamelCase__ : int = model(A_ )[0]
UpperCamelCase__ : Tuple = torch.Size([1, 8, 10_24] )
self.assertEqual(output.shape, A_ )
UpperCamelCase__ : Tuple = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], A_, atol=1e-4 ) )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def __lowercase( self : Union[str, Any] ) -> str:
UpperCamelCase__ : int = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
UpperCamelCase__ : Optional[Any] = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
UpperCamelCase__ : List[str] = model(A_ )[0]
UpperCamelCase__ : List[str] = torch.Size([1, 8, 5_03_58] )
self.assertEqual(output.shape, A_ )
UpperCamelCase__ : Optional[int] = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], A_, atol=1e-4 ) )
| 344
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__lowerCamelCase : str = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629
| 0
|
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : str , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Any , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : int ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : str , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> str:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Any , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Optional[int] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Any , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : int , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Dict , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : int , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : str , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
requires_backends(cls , ["torch"] )
def _A ( *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : str ):
"""simple docstring"""
requires_backends(lowerCAmelCase_ , ["torch"] )
def _A ( *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : List[str] ):
"""simple docstring"""
requires_backends(lowerCAmelCase_ , ["torch"] )
def _A ( *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Any ):
"""simple docstring"""
requires_backends(lowerCAmelCase_ , ["torch"] )
def _A ( *lowerCAmelCase_ : str , **lowerCAmelCase_ : Dict ):
"""simple docstring"""
requires_backends(lowerCAmelCase_ , ["torch"] )
def _A ( *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
requires_backends(lowerCAmelCase_ , ["torch"] )
def _A ( *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(lowerCAmelCase_ , ["torch"] )
def _A ( *lowerCAmelCase_ : str , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
requires_backends(lowerCAmelCase_ , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Any , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : str , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : int , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Dict , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : int , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Dict ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : str , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Dict ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : int , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Dict , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Dict , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : int , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Any , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : int , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Optional[int] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Optional[int] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : str ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Dict , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : str , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Optional[int] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : int , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : str , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : int ) -> str:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : str , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Dict , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Any , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : str , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : str ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : int ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Dict ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : str , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Dict , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : str , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Any ) -> str:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Any ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : str ) -> Any:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Any ) -> str:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Dict ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Dict ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : int , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : int , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Optional[int] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Any ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Dict , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Any , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : int ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Any , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Dict ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Dict , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Optional[int] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Dict ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Dict , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : int , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : int , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class __lowerCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["torch"]
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Any ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def a ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def a ( cls : Dict , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
requires_backends(cls , ["torch"] )
| 702
|
UpperCamelCase = 9.80_665
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float = g ):
"""simple docstring"""
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 125
| 0
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_snake_case : int = HfArgumentParser(InitializationArguments)
_snake_case : str = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_snake_case : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_snake_case : int = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
_snake_case : Dict = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_snake_case : Union[str, Any] = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 53
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Optional[int]=8 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=9_9 , lowerCAmelCase_ : List[Any]=1_6 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=3_6 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : str=5_1_2 , lowerCAmelCase_ : List[str]=1_6 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : List[str]=None , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : Any ) -> Union[str, Any]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def lowercase ( self : Dict ) -> List[Any]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = 3_0_0
return config
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
__lowerCAmelCase = True
__lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = MraModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , ) -> Tuple:
__lowerCAmelCase = True
__lowerCAmelCase = MraModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = MraForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> str:
__lowerCAmelCase = MraForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MraForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict ) -> Any:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MraForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = MraForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = ()
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = MraModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Tuple ) -> List[str]:
self.config_tester.run_common_tests()
def lowercase ( self : Optional[int] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
@slow
def lowercase ( self : Optional[int] ) -> Optional[int]:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = MraModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@unittest.skip(reason='MRA does not output attentions' )
def lowercase ( self : Optional[int] ) -> Tuple:
return
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
__lowerCAmelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : int ) -> Optional[int]:
__lowerCAmelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
__lowerCAmelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = 5_0_2_6_5
__lowerCAmelCase = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : Any ) -> List[str]:
__lowerCAmelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
__lowerCAmelCase = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = 5_0_2_6_5
__lowerCAmelCase = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53
| 1
|
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] =ProphetNetTokenizer
__lowerCamelCase : Dict =False
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
__a = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self : List[str] , __lowercase : List[str] ):
'''simple docstring'''
__a = """UNwant\u00E9d,running"""
__a = """unwanted, running"""
return input_text, output_text
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.tokenizer_class(self.vocab_file )
__a = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__lowercase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [9, 6, 7, 12, 10, 11] )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = BasicTokenizer(do_lower_case=__lowercase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__a = {}
for i, token in enumerate(__lowercase ):
__a = i
__a = WordpieceTokenizer(vocab=__lowercase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
__a = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__a = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
__a = tokenizer(__lowercase , padding=__lowercase , return_tensors="""pt""" )
self.assertIsInstance(__lowercase , __lowercase )
__a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
__a = tokenizer.encode("""sequence builders""" , add_special_tokens=__lowercase )
__a = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__lowercase )
__a = tokenizer.build_inputs_with_special_tokens(__lowercase )
__a = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 547
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : torch.FloatTensor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self : Tuple , __lowercase : int = 16 , __lowercase : int = 88 , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : int = 1 , __lowercase : float = 0.0 , __lowercase : int = 32 , __lowercase : Optional[int] = None , __lowercase : bool = False , __lowercase : Optional[int] = None , __lowercase : str = "geglu" , __lowercase : bool = True , __lowercase : bool = True , ):
'''simple docstring'''
super().__init__()
__a = num_attention_heads
__a = attention_head_dim
__a = num_attention_heads * attention_head_dim
__a = in_channels
__a = torch.nn.GroupNorm(num_groups=__lowercase , num_channels=__lowercase , eps=1E-6 , affine=__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
# 3. Define transformers blocks
__a = nn.ModuleList(
[
BasicTransformerBlock(
__lowercase , __lowercase , __lowercase , dropout=__lowercase , cross_attention_dim=__lowercase , activation_fn=__lowercase , attention_bias=__lowercase , double_self_attention=__lowercase , norm_elementwise_affine=__lowercase , )
for d in range(__lowercase )
] )
__a = nn.Linear(__lowercase , __lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : Optional[Any] , __lowercase : str=None , __lowercase : List[Any]=None , __lowercase : List[Any]=None , __lowercase : int=1 , __lowercase : Union[str, Any]=None , __lowercase : bool = True , ):
'''simple docstring'''
__a , __a , __a , __a = hidden_states.shape
__a = batch_frames // num_frames
__a = hidden_states
__a = hidden_states[None, :].reshape(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
__a = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__a = self.norm(__lowercase )
__a = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __lowercase , __lowercase )
__a = self.proj_in(__lowercase )
# 2. Blocks
for block in self.transformer_blocks:
__a = block(
__lowercase , encoder_hidden_states=__lowercase , timestep=__lowercase , cross_attention_kwargs=__lowercase , class_labels=__lowercase , )
# 3. Output
__a = self.proj_out(__lowercase )
__a = (
hidden_states[None, None, :]
.reshape(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__a = hidden_states.reshape(__lowercase , __lowercase , __lowercase , __lowercase )
__a = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__lowercase )
| 547
| 1
|
"""simple docstring"""
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
a_ = logging.get_logger(__name__)
a_ = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
a_ = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
a_ = {
'jukebox': 5_1_2,
}
class UpperCAmelCase_ ( lowerCAmelCase_ ):
UpperCamelCase =VOCAB_FILES_NAMES
UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase =PRETRAINED_LYRIC_TOKENS_SIZES
UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=["v3", "v2", "v2"] , UpperCamelCase_=5_12 , UpperCamelCase_=5 , UpperCamelCase_="<|endoftext|>" , **UpperCamelCase_ , ) -> Tuple:
__lowercase : Union[str, Any] = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else unk_token
super().__init__(
unk_token=A__ , n_genres=A__ , version=A__ , max_n_lyric_tokens=A__ , **A__ , )
__lowercase : List[str] = version
__lowercase : Optional[Any] = max_n_lyric_tokens
__lowercase : Tuple = n_genres
with open(A__ , encoding='''utf-8''' ) as vocab_handle:
__lowercase : List[Any] = json.load(A__ )
with open(A__ , encoding='''utf-8''' ) as vocab_handle:
__lowercase : Any = json.load(A__ )
with open(A__ , encoding='''utf-8''' ) as vocab_handle:
__lowercase : Dict = json.load(A__ )
__lowercase : Tuple = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__lowercase : str = oov.replace(R'''\-\'''' , R'''\-+\'''' )
__lowercase : List[str] = regex.compile(A__ )
__lowercase : List[str] = {v: k for k, v in self.artists_encoder.items()}
__lowercase : Optional[Any] = {v: k for k, v in self.genres_encoder.items()}
__lowercase : Union[str, Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def _lowerCamelCase ( self ) -> Tuple:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def _lowerCamelCase ( self ) -> Optional[int]:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Any:
__lowercase : Union[str, Any] = [self.artists_encoder.get(A__ , 0 ) for artist in list_artists]
for genres in range(len(A__ ) ):
__lowercase : Dict = [self.genres_encoder.get(A__ , 0 ) for genre in list_genres[genres]]
__lowercase : Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__lowercase : Dict = [[self.lyrics_encoder.get(A__ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[Any]:
return list(A__ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) -> Any:
__lowercase : Optional[int] = self.prepare_for_tokenization(A__ , A__ , A__ )
__lowercase : Any = self._tokenize(A__ )
return artist, genre, lyrics
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__lowercase : Optional[Any] = artists[idx].lower()
__lowercase : Union[str, Any] = [genres[idx].lower()]
else:
__lowercase : str = self._normalize(artists[idx] ) + '''.v2'''
__lowercase : Union[str, Any] = [
self._normalize(A__ ) + '''.v2''' for genre in genres[idx].split('''_''' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__lowercase : Any = regex.compile(R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' )
__lowercase : str = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
__lowercase : List[Any] = {vocab[index]: index + 1 for index in range(len(A__ ) )}
__lowercase : int = 0
__lowercase : List[str] = len(A__ ) + 1
__lowercase : Any = self.vocab
__lowercase : List[str] = {v: k for k, v in self.vocab.items()}
__lowercase : Any = ''''''
else:
__lowercase : Any = regex.compile(R'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' )
__lowercase : Any = self._run_strip_accents(A__ )
__lowercase : Dict = lyrics.replace('''\\''' , '''\n''' )
__lowercase : Union[str, Any] = self.out_of_vocab.sub('''''' , A__ ), [], []
return artists, genres, lyrics
def _lowerCamelCase ( self , UpperCamelCase_ ) -> str:
__lowercase : Union[str, Any] = unicodedata.normalize('''NFD''' , A__ )
__lowercase : int = []
for char in text:
__lowercase : Union[str, Any] = unicodedata.category(A__ )
if cat == "Mn":
continue
output.append(A__ )
return "".join(A__ )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> str:
__lowercase : List[Any] = (
[chr(A__ ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )]
+ [chr(A__ ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )]
+ [chr(A__ ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )]
+ ['''.''']
)
__lowercase : Dict = frozenset(A__ )
__lowercase : Tuple = re.compile(R'''_+''' )
__lowercase : Optional[int] = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] )
__lowercase : Any = pattern.sub('''_''' , A__ ).strip('''_''' )
return text
def _lowerCamelCase ( self , UpperCamelCase_ ) -> str:
return " ".join(A__ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ) -> int:
if not isinstance(A__ , A__ ):
__lowercase : Optional[Any] = TensorType(A__ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' )
import tensorflow as tf
__lowercase : Dict = tf.constant
__lowercase : Optional[Any] = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' )
import torch
__lowercase : Optional[Any] = torch.tensor
__lowercase : int = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' )
import jax.numpy as jnp # noqa: F811
__lowercase : str = jnp.array
__lowercase : str = _is_jax
else:
__lowercase : int = np.asarray
__lowercase : List[str] = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__lowercase : int = [inputs]
if not is_tensor(A__ ):
__lowercase : str = as_tensor(A__ )
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' )
return inputs
def __call__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="" , UpperCamelCase_="pt" ) -> BatchEncoding:
__lowercase : Dict = [0, 0, 0]
__lowercase : str = [artist] * len(self.version )
__lowercase : Optional[int] = [genres] * len(self.version )
__lowercase : Union[str, Any] = self.tokenize(A__ , A__ , A__ )
__lowercase : Any = self._convert_token_to_id(A__ , A__ , A__ )
__lowercase : str = [-INFINITY] * len(full_tokens[-1] )
__lowercase : Optional[Any] = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A__ )
for i in range(len(self.version ) )
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(A__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase : Optional[Any] = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] )
with open(A__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A__ ) )
__lowercase : List[Any] = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] )
with open(A__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A__ ) )
__lowercase : Optional[Any] = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] )
with open(A__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A__ ) )
return (artists_file, genres_file, lyrics_file)
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
__lowercase : Tuple = self.artists_decoder.get(A__ )
__lowercase : Any = [self.genres_decoder.get(A__ ) for genre in genres_index]
__lowercase : Any = [self.lyrics_decoder.get(A__ ) for character in lyric_index]
return artist, genres, lyrics
| 76
|
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = LxmertTokenizer
__UpperCamelCase = LxmertTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def __lowerCAmelCase ( self : str ) -> str:
'''simple docstring'''
super().setUp()
a__ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : int , A__ : int ) -> int:
'''simple docstring'''
a__ : List[Any] = '''UNwant\u00E9d,running'''
a__ : Optional[int] = '''unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
a__ : Optional[int] = self.tokenizer_class(self.vocab_file )
a__ : List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [7, 4, 5, 1_0, 8, 9] )
def __lowerCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__ : Union[str, Any] = self.get_tokenizer()
a__ : Union[str, Any] = self.get_rust_tokenizer()
a__ : str = '''I was born in 92000, and this is falsé.'''
a__ : Tuple = tokenizer.tokenize(A__ )
a__ : Tuple = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
a__ : Optional[int] = tokenizer.encode(A__ , add_special_tokens=A__ )
a__ : Optional[Any] = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
a__ : List[str] = self.get_rust_tokenizer()
a__ : str = tokenizer.encode(A__ )
a__ : int = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
| 688
| 0
|
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
if not isinstance(_A ,_A ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_A ,_A ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
lowerCamelCase_ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_A )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
'''simple docstring'''
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
A_ = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
A_ = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {'BertModelTest': 'BertModelTester'}
lowerCamelCase_ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = get_model_to_test_mapping(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = get_model_to_test_mapping(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
lowerCamelCase_ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
lowerCamelCase_ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
| 384
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["OwlViTFeatureExtractor"]
SCREAMING_SNAKE_CASE__ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 631
| 0
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case):
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__snake_case , __snake_case = array[indexa], array[indexa]
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case):
if length > 1:
__snake_case = int(length / 2)
for i in range(snake_case, low + middle):
comp_and_swap(snake_case, snake_case, i + middle, snake_case)
bitonic_merge(snake_case, snake_case, snake_case, snake_case)
bitonic_merge(snake_case, low + middle, snake_case, snake_case)
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case):
if length > 1:
__snake_case = int(length / 2)
bitonic_sort(snake_case, snake_case, snake_case, 1)
bitonic_sort(snake_case, low + middle, snake_case, 0)
bitonic_merge(snake_case, snake_case, snake_case, snake_case)
if __name__ == "__main__":
__lowercase : Optional[int] = input("Enter numbers separated by a comma:\n").strip()
__lowercase : Tuple = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 701
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _A ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : str ) -> str:
__snake_case = tempfile.mkdtemp()
__snake_case = BlipImageProcessor()
__snake_case = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
__snake_case = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
__snake_case = InstructBlipProcessor(A_ , A_ , A_ )
processor.save_pretrained(self.tmpdirname )
def lowercase ( self : Tuple , **A_ : str ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).tokenizer
def lowercase ( self : Union[str, Any] , **A_ : Tuple ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).image_processor
def lowercase ( self : Union[str, Any] , **A_ : Tuple ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).qformer_tokenizer
def lowercase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def lowercase ( self : Optional[int] ) -> Tuple:
__snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : int ) -> Dict:
__snake_case = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
__snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__snake_case = self.get_image_processor(do_normalize=A_ , padding_value=1.0 )
__snake_case = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
self.assertIsInstance(processor.qformer_tokenizer , A_ )
def lowercase ( self : int ) -> str:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_qformer_tokenizer()
__snake_case = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
__snake_case = self.prepare_image_inputs()
__snake_case = image_processor(A_ , return_tensors='''np''' )
__snake_case = processor(images=A_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase ( self : List[str] ) -> Optional[int]:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_qformer_tokenizer()
__snake_case = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
__snake_case = '''lower newer'''
__snake_case = processor(text=A_ )
__snake_case = tokenizer(A_ , return_token_type_ids=A_ )
__snake_case = qformer_tokenizer(A_ , return_token_type_ids=A_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def lowercase ( self : List[str] ) -> int:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_qformer_tokenizer()
__snake_case = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
__snake_case = '''lower newer'''
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=A_ , images=A_ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def lowercase ( self : str ) -> Union[str, Any]:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_qformer_tokenizer()
__snake_case = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
__snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case = processor.batch_decode(A_ )
__snake_case = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def lowercase ( self : int ) -> List[str]:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_qformer_tokenizer()
__snake_case = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
__snake_case = '''lower newer'''
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=A_ , images=A_ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 93
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCamelCase_ : List[str] = logging.get_logger(__name__)
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
def __init__( self : Any , *_snake_case : int , **_snake_case : Tuple ) -> None:
"""simple docstring"""
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 115
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCamelCase_ : Dict = None
UpperCamelCase_ : int = logging.get_logger(__name__)
UpperCamelCase_ : str = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase_ : List[Any] = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
UpperCamelCase_ : Optional[Any] = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
UpperCamelCase_ : str = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = ["input_ids", "attention_mask"]
snake_case = MBartTokenizer
snake_case = []
snake_case = []
def __init__( self : List[str] , _snake_case : Tuple=None , _snake_case : int=None , _snake_case : List[Any]="<s>" , _snake_case : Tuple="</s>" , _snake_case : str="</s>" , _snake_case : List[Any]="<s>" , _snake_case : Dict="<unk>" , _snake_case : str="<pad>" , _snake_case : Any="<mask>" , _snake_case : int=None , _snake_case : Optional[int]=None , _snake_case : Any=None , **_snake_case : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
A_ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
vocab_file=_snake_case , tokenizer_file=_snake_case , bos_token=_snake_case , eos_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , src_lang=_snake_case , tgt_lang=_snake_case , additional_special_tokens=_snake_case , **_snake_case , )
A_ = vocab_file
A_ = False if not self.vocab_file else True
A_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
A_ = {
lang_code: self.convert_tokens_to_ids(_snake_case ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
A_ = src_lang if src_lang is not None else "en_XX"
A_ = self.convert_tokens_to_ids(self._src_lang )
A_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase__ ( self : Dict ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowerCamelCase__ ( self : Tuple , _snake_case : str ) -> None:
"""simple docstring"""
A_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase__ ( self : Optional[Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase__ ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : List[Any] , _snake_case : str , _snake_case : str , _snake_case : Optional[str] , _snake_case : Optional[str] , **_snake_case : Optional[int] ) -> str:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
A_ = src_lang
A_ = self(_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , **_snake_case )
A_ = self.convert_tokens_to_ids(_snake_case )
A_ = tgt_lang_id
return inputs
def lowerCamelCase__ ( self : Dict , _snake_case : List[str] , _snake_case : str = "en_XX" , _snake_case : Optional[List[str]] = None , _snake_case : str = "ro_RO" , **_snake_case : str , ) -> BatchEncoding:
"""simple docstring"""
A_ = src_lang
A_ = tgt_lang
return super().prepare_seqaseq_batch(_snake_case , _snake_case , **_snake_case )
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase__ ( self : Tuple , _snake_case : List[str] ) -> None:
"""simple docstring"""
A_ = self.convert_tokens_to_ids(_snake_case )
A_ = []
A_ = [self.eos_token_id, self.cur_lang_code]
A_ = self.convert_ids_to_tokens(self.prefix_tokens )
A_ = self.convert_ids_to_tokens(self.suffix_tokens )
A_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase__ ( self : List[str] , _snake_case : str ) -> None:
"""simple docstring"""
A_ = self.convert_tokens_to_ids(_snake_case )
A_ = []
A_ = [self.eos_token_id, self.cur_lang_code]
A_ = self.convert_ids_to_tokens(self.prefix_tokens )
A_ = self.convert_ids_to_tokens(self.suffix_tokens )
A_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : str , _snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
A_ = os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
return (out_vocab_file,)
| 115
| 1
|
"""simple docstring"""
from collections.abc import Generator
def A_ ( ):
'''simple docstring'''
snake_case_ :str = 0, 1
while True:
snake_case_ :int = b, a + b
yield b
def A_ ( _lowercase = 1000 ):
'''simple docstring'''
snake_case_ :Tuple = 1
snake_case_ :List[str] = fibonacci_generator()
while len(str(next(_lowercase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 711
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 310
| 0
|
import math
def lowerCAmelCase_ ( snake_case_ ):
return math.sqrt(snake_case_ ) * math.sqrt(snake_case_ ) == num
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = 0
_A : Optional[Any] = n
while left <= right:
_A : int = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_A : Optional[int] = mid - 1
else:
_A : Any = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
# Initialise PyTorch model
_A : Dict = BigBirdConfig.from_json_file(snake_case_ )
print(f'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
_A : Optional[int] = BigBirdForQuestionAnswering(snake_case_ )
else:
_A : str = BigBirdForPreTraining(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(snake_case_,snake_case_,is_trivia_qa=snake_case_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
_snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 307
| 1
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = TransfoXLTokenizer
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def lowercase_ ( self : Tuple ) ->int:
super().setUp()
snake_case__ : Any = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
snake_case__ : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase_ ( self : str, **_snake_case : str ) ->Optional[Any]:
snake_case__ : Optional[int] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **_snake_case )
def lowercase_ ( self : Tuple, _snake_case : Optional[int] ) ->List[Any]:
snake_case__ : Union[str, Any] = '<unk> UNwanted , running'
snake_case__ : Any = '<unk> unwanted, running'
return input_text, output_text
def lowercase_ ( self : Union[str, Any] ) ->Optional[int]:
snake_case__ : int = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=_snake_case )
snake_case__ : Union[str, Any] = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(_snake_case, ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ), [0, 4, 8, 7] )
def lowercase_ ( self : int ) ->Any:
snake_case__ : str = TransfoXLTokenizer(lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ), ['hello', '!', 'how', 'are', 'you', '?'] )
def lowercase_ ( self : str ) ->List[str]:
snake_case__ : List[str] = TransfoXLTokenizer(lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ), ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowercase_ ( self : Optional[Any] ) ->List[Any]:
snake_case__ : List[Any] = TransfoXLTokenizer(lower_case=_snake_case )
snake_case__ : Union[str, Any] = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
snake_case__ : Optional[Any] = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(_snake_case ), _snake_case )
self.assertEqual(tokenizer.convert_tokens_to_string(_snake_case ), _snake_case )
def lowercase_ ( self : Optional[Any] ) ->List[str]:
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : Optional[Any] = len(_snake_case )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1', 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_snake_case ), original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ), [1] )
self.assertEqual(tokenizer.decode([1] ), 'new1' )
| 243
|
from math import sqrt
def lowercase_ (A : int ):
snake_case__ : Optional[int] = 0
for i in range(1 , int(sqrt(A ) + 1 ) ):
if n % i == 0 and i != sqrt(A ):
total += i + n // i
elif i == sqrt(A ):
total += i
return total - n
def lowercase_ (A : int = 1_0_0_0_0 ):
snake_case__ : Any = sum(
i
for i in range(1 , A )
if sum_of_divisors(sum_of_divisors(A ) ) == i and sum_of_divisors(A ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 243
| 1
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( lowerCAmelCase__ :list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(lowerCAmelCase__ , [] , 0 , [0 for i in range(len(lowerCAmelCase__ ) )] )
def UpperCAmelCase__ ( lowerCAmelCase__ :list[int | str] , lowerCAmelCase__ :list[int | str] , lowerCAmelCase__ :int , lowerCAmelCase__ :list[int] , ) -> None:
'''simple docstring'''
if index == len(lowerCAmelCase__ ):
print(lowerCAmelCase__ )
return
for i in range(len(lowerCAmelCase__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
lowercase = True
create_state_space_tree(lowerCAmelCase__ , lowerCAmelCase__ , index + 1 , lowerCAmelCase__ )
current_sequence.pop()
lowercase = False
__lowerCAmelCase : list[int | str] =[3, 1, 2, 4]
generate_all_permutations(sequence)
__lowerCAmelCase : list[int | str] =["A", "B", "C"]
generate_all_permutations(sequence_a)
| 359
|
"""simple docstring"""
from math import pow, sqrt
def UpperCAmelCase__ ( *lowerCAmelCase__ :float ) -> bool:
'''simple docstring'''
lowercase = len(lowerCAmelCase__ ) > 0 and all(value > 0.0 for value in values )
return result
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> float | ValueError:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> float | ValueError:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> float | ValueError:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
| 359
| 1
|
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCAmelCase__ = logging.get_logger(__name__)
class a :
def __init__( self : Tuple , __lowerCAmelCase : str = None , __lowerCAmelCase : uuid.UUID = None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Tuple=None ):
if not conversation_id:
_UpperCAmelCase = uuid.uuida()
if past_user_inputs is None:
_UpperCAmelCase = []
if generated_responses is None:
_UpperCAmelCase = []
_UpperCAmelCase = conversation_id
_UpperCAmelCase = past_user_inputs
_UpperCAmelCase = generated_responses
_UpperCAmelCase = text
def __eq__( self : Optional[Any] , __lowerCAmelCase : Union[str, Any] ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
f'''with: "{text}".''' )
_UpperCAmelCase = text
else:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
f'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
_UpperCAmelCase = text
def lowerCAmelCase_ ( self : int ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_UpperCAmelCase = None
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str ):
self.generated_responses.append(__lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Optional[int] ):
_UpperCAmelCase = f'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
_UpperCAmelCase = """user""" if is_user else """bot"""
output += f'''{name} >> {text} \n'''
return output
@add_end_docstrings(
lowerCAmelCase_ , R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class a ( lowerCAmelCase_ ):
def __init__( self : List[Any] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Tuple ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if self.tokenizer.pad_token_id is None:
_UpperCAmelCase = self.tokenizer.eos_token
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : str ):
_UpperCAmelCase = {}
_UpperCAmelCase = {}
_UpperCAmelCase = {}
if min_length_for_response is not None:
_UpperCAmelCase = min_length_for_response
if minimum_tokens is not None:
_UpperCAmelCase = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCAmelCase = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__lowerCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Tuple , __lowerCAmelCase : Union[Conversation, List[Conversation]] , __lowerCAmelCase : int=0 , **__lowerCAmelCase : List[str] ):
_UpperCAmelCase = super().__call__(__lowerCAmelCase , num_workers=__lowerCAmelCase , **__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Conversation , __lowerCAmelCase : Optional[int]=32 ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
f'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
_UpperCAmelCase = self.tokenizer._build_conversation_input_ids(__lowerCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCAmelCase = self._legacy_parse_and_tokenize(__lowerCAmelCase )
if self.framework == "pt":
_UpperCAmelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_UpperCAmelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str]=10 , **__lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
_UpperCAmelCase = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
_UpperCAmelCase = max_length - minimum_tokens
_UpperCAmelCase = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCAmelCase = model_inputs["""attention_mask"""][:, -trim:]
_UpperCAmelCase = model_inputs.pop("""conversation""" )
_UpperCAmelCase = max_length
_UpperCAmelCase = self.model.generate(**__lowerCAmelCase , **__lowerCAmelCase )
if self.model.config.is_encoder_decoder:
_UpperCAmelCase = 1
else:
_UpperCAmelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any]=True ):
_UpperCAmelCase = model_outputs["""output_ids"""]
_UpperCAmelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )
_UpperCAmelCase = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(__lowerCAmelCase )
return conversation
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Conversation ):
_UpperCAmelCase = self.tokenizer.eos_token_id
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) )
if len(__lowerCAmelCase ) > self.tokenizer.model_max_length:
_UpperCAmelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 704
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class a ( lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Union[str, Any] = 'nat'
_snake_case : List[str] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[Any] , __lowerCAmelCase : int=4 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Dict=64 , __lowerCAmelCase : int=[3, 4, 6, 5] , __lowerCAmelCase : List[str]=[2, 4, 8, 16] , __lowerCAmelCase : Tuple=7 , __lowerCAmelCase : List[str]=3.0 , __lowerCAmelCase : int=True , __lowerCAmelCase : int=0.0 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : Union[str, Any]=1e-5 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Optional[Any] , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(__lowerCAmelCase )
_UpperCAmelCase = num_heads
_UpperCAmelCase = kernel_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase = int(embed_dim * 2 ** (len(__lowerCAmelCase ) - 1) )
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(__lowerCAmelCase ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
| 275
| 0
|
import warnings
from ..trainer import Trainer
from ..utils import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase=None, **lowerCamelCase) -> int:
"""simple docstring"""
warnings.warn(
'`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '
'instead.', lowerCamelCase, )
super().__init__(args=lowerCamelCase, **lowerCamelCase)
| 89
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 20
lowerCamelCase_ = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase_ = jax.nn.softmax(UpperCAmelCase , axis=-1 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create ramp distribution
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, length) ).copy()
lowerCamelCase_ = top_k_warp_safety_check(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase_ = np.exp(top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase_ = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
lowerCamelCase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
# check that min length is applied at length 5
lowerCamelCase_ = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase_ = 5
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = 15
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase_ = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase_ = 1
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase_ = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase_ = 4
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# with processor list
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
def run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
# with processor list
def run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jitted_run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = jitted_run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 29
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 710
|
__lowerCamelCase = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 307
| 0
|
"""simple docstring"""
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 58
|
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowercase = 1_2_8 , _lowercase = 2_5_6 , _lowercase = 2000.0 , _lowercase = 7_6_8 , _lowercase = 1_2 , _lowercase = 1_2 , _lowercase = 6_4 , _lowercase = 2_0_4_8 , _lowercase = 0.1 , ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[Any] = nn.Sequential(
nn.Linear(_lowercase , d_model * 4 , bias=_lowercase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowercase ) , nn.SiLU() , )
snake_case_ : Any = nn.Embedding(_lowercase , _lowercase )
snake_case_ : Union[str, Any] = False
snake_case_ : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Union[str, Any] = nn.Dropout(p=_lowercase )
snake_case_ : Tuple = nn.ModuleList()
for lyr_num in range(_lowercase ):
# FiLM conditional T5 decoder
snake_case_ : Union[str, Any] = DecoderLayer(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
self.decoders.append(_lowercase )
snake_case_ : List[Any] = TaLayerNorm(_lowercase )
snake_case_ : Optional[Any] = nn.Dropout(p=_lowercase )
snake_case_ : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
snake_case_ : Optional[int] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
snake_case_ : int = self.conditioning_emb(_lowercase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
snake_case_ : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
snake_case_ : Dict = torch.broadcast_to(
torch.arange(_lowercase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
snake_case_ : Tuple = self.position_encoding(_lowercase )
snake_case_ : Optional[Any] = self.continuous_inputs_projection(_lowercase )
inputs += position_encodings
snake_case_ : List[Any] = self.dropout(_lowercase )
# decoder: No padding present.
snake_case_ : Tuple = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
snake_case_ : int = [(x, self.encoder_decoder_mask(_lowercase , _lowercase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
snake_case_ : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
snake_case_ : str = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
snake_case_ : int = lyr(
_lowercase , conditioning_emb=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )[0]
snake_case_ : int = self.decoder_norm(_lowercase )
snake_case_ : Union[str, Any] = self.post_dropout(_lowercase )
snake_case_ : int = self.spec_out(_lowercase )
return spec_out
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=1E-6 ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
snake_case_ : Any = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase ) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = self.layer[0](
_lowercase , conditioning_emb=_lowercase , attention_mask=_lowercase , )
if encoder_hidden_states is not None:
snake_case_ : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
snake_case_ : str = self.layer[1](
_lowercase , key_value_states=_lowercase , attention_mask=_lowercase , )
# Apply Film Conditional Feed Forward layer
snake_case_ : Any = self.layer[-1](_lowercase , _lowercase )
return (hidden_states,)
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Any = TaLayerNorm(_lowercase )
snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
snake_case_ : Union[str, Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
snake_case_ : List[Any] = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.layer_norm(_lowercase )
if conditioning_emb is not None:
snake_case_ : str = self.FiLMLayer(_lowercase , _lowercase )
# Self-attention block
snake_case_ : List[Any] = self.attention(_lowercase )
snake_case_ : List[str] = hidden_states + self.dropout(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
snake_case_ : Union[str, Any] = TaLayerNorm(_lowercase , eps=_lowercase )
snake_case_ : Optional[Any] = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.layer_norm(_lowercase )
snake_case_ : Optional[Any] = self.attention(
_lowercase , encoder_hidden_states=_lowercase , attention_mask=attention_mask.squeeze(1 ) , )
snake_case_ : Any = hidden_states + self.dropout(_lowercase )
return layer_output
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Tuple = TaDenseGatedActDense(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
snake_case_ : Optional[int] = TaLayerNorm(_lowercase , eps=_lowercase )
snake_case_ : Tuple = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = self.layer_norm(_lowercase )
if conditioning_emb is not None:
snake_case_ : Optional[int] = self.film(_lowercase , _lowercase )
snake_case_ : int = self.DenseReluDense(_lowercase )
snake_case_ : Optional[Any] = hidden_states + self.dropout(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Any = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : int = nn.Dropout(_lowercase )
snake_case_ : Optional[int] = NewGELUActivation()
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : str = self.act(self.wi_a(_lowercase ) )
snake_case_ : Dict = self.wi_a(_lowercase )
snake_case_ : Any = hidden_gelu * hidden_linear
snake_case_ : List[Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.wo(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1E-6 ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Union[str, Any] = nn.Parameter(torch.ones(_lowercase ) )
snake_case_ : int = eps
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_lowercase )
snake_case_ : Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
snake_case_ : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def UpperCAmelCase__ ( self , _lowercase ) -> torch.Tensor:
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(_lowercase , 3.0 )) ))
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = nn.Linear(_lowercase , out_features * 2 , bias=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.scale_bias(_lowercase )
snake_case_ , snake_case_ : Any = torch.chunk(_lowercase , 2 , -1 )
snake_case_ : Optional[Any] = x * (1 + scale) + shift
return x
| 58
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["MobileViTFeatureExtractor"]
lowercase_ = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 713
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = 'lxmert'
__UpperCAmelCase : str = {}
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=9_500 , _a=1_600 , _a=400 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=9 , _a=5 , _a=5 , _a=2_048 , _a=4 , _a=6.67 , _a=True , _a=True , _a=True , _a=True , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = hidden_size
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = num_qa_labels
__a = num_object_labels
__a = num_attr_labels
__a = l_layers
__a = x_layers
__a = r_layers
__a = visual_feat_dim
__a = visual_pos_dim
__a = visual_loss_normalizer
__a = task_matched
__a = task_mask_lm
__a = task_obj_predict
__a = task_qa
__a = visual_obj_loss
__a = visual_attr_loss
__a = visual_feat_loss
__a = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**_a )
| 65
| 0
|
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE : Tuple = NewType("""DataClass""", Any)
SCREAMING_SNAKE_CASE : Optional[Any] = NewType("""DataClassType""", Any)
def __A ( _A ):
"""simple docstring"""
if isinstance(_A , _A ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __A ( _A ):
"""simple docstring"""
__a = {str(_A ): choice for choice in choices}
return lambda _A : str_to_choice.get(_A , _A )
def __A ( *,
_A = None , _A = None , _A = dataclasses.MISSING , _A = dataclasses.MISSING , _A = None , **_A , ):
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__a = {}
if aliases is not None:
__a = aliases
if help is not None:
__a = help
return dataclasses.field(metadata=_A , default=_A , default_factory=_A , **_A )
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = 42
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[DataClassType, Iterable[DataClassType]] , **__SCREAMING_SNAKE_CASE : Optional[Any] ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__a = ArgumentDefaultsHelpFormatter
super().__init__(**__SCREAMING_SNAKE_CASE )
if dataclasses.is_dataclass(__SCREAMING_SNAKE_CASE ):
__a = [dataclass_types]
__a = list(__SCREAMING_SNAKE_CASE )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__SCREAMING_SNAKE_CASE )
@staticmethod
def _UpperCAmelCase ( __SCREAMING_SNAKE_CASE : ArgumentParser , __SCREAMING_SNAKE_CASE : dataclasses.Field ):
__a = f"""--{field.name}"""
__a = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __SCREAMING_SNAKE_CASE ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
__a = kwargs.pop("aliases" , [] )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__a = [aliases]
__a = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(__SCREAMING_SNAKE_CASE , "UnionType" ) and isinstance(__SCREAMING_SNAKE_CASE , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__SCREAMING_SNAKE_CASE ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f""" Problem encountered in field '{field.name}'.""" )
if type(__SCREAMING_SNAKE_CASE ) not in field.type.__args__:
# filter `str` in Union
__a = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__a = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__a = (
field.type.__args__[0] if isinstance(__SCREAMING_SNAKE_CASE , field.type.__args__[1] ) else field.type.__args__[1]
)
__a = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__a = {}
if origin_type is Literal or (isinstance(field.type , __SCREAMING_SNAKE_CASE ) and issubclass(field.type , __SCREAMING_SNAKE_CASE )):
if origin_type is Literal:
__a = field.type.__args__
else:
__a = [x.value for x in field.type]
__a = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
__a = field.default
else:
__a = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__a = copy(__SCREAMING_SNAKE_CASE )
# Hack because type=bool in argparse does not behave as we want.
__a = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__a = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__a = default
# This tells argparse we accept 0 or 1 value after --field_name
__a = "?"
# This is the value that will get picked if we do --field_name (without value)
__a = True
elif isclass(__SCREAMING_SNAKE_CASE ) and issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__a = field.type.__args__[0]
__a = "+"
if field.default_factory is not dataclasses.MISSING:
__a = field.default_factory()
elif field.default is dataclasses.MISSING:
__a = True
else:
__a = field.type
if field.default is not dataclasses.MISSING:
__a = field.default
elif field.default_factory is not dataclasses.MISSING:
__a = field.default_factory()
else:
__a = True
parser.add_argument(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__a = False
parser.add_argument(f"""--no_{field.name}""" , action="store_false" , dest=field.name , **__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Dict , __SCREAMING_SNAKE_CASE : DataClassType ):
if hasattr(__SCREAMING_SNAKE_CASE , "_argument_group_name" ):
__a = self.add_argument_group(dtype._argument_group_name )
else:
__a = self
try:
__a = get_type_hints(__SCREAMING_SNAKE_CASE )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__SCREAMING_SNAKE_CASE ):
__a = ".".join(map(__SCREAMING_SNAKE_CASE , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(__SCREAMING_SNAKE_CASE ):
if not field.init:
continue
__a = type_hints[field.name]
self._parse_dataclass_field(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : int=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__a = []
if args_filename:
args_files.append(Path(__SCREAMING_SNAKE_CASE ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__a = ArgumentParser()
args_file_parser.add_argument(__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
__a , __a = args_file_parser.parse_known_args(args=__SCREAMING_SNAKE_CASE )
__a = vars(__SCREAMING_SNAKE_CASE ).get(args_file_flag.lstrip("-" ) , __SCREAMING_SNAKE_CASE )
if cmd_args_file_paths:
args_files.extend([Path(__SCREAMING_SNAKE_CASE ) for p in cmd_args_file_paths] )
__a = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__a = file_args + args if args is not None else file_args + sys.argv[1:]
__a , __a = self.parse_known_args(args=__SCREAMING_SNAKE_CASE )
__a = []
for dtype in self.dataclass_types:
__a = {f.name for f in dataclasses.fields(__SCREAMING_SNAKE_CASE ) if f.init}
__a = {k: v for k, v in vars(__SCREAMING_SNAKE_CASE ).items() if k in keys}
for k in keys:
delattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = dtype(**__SCREAMING_SNAKE_CASE )
outputs.append(__SCREAMING_SNAKE_CASE )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__SCREAMING_SNAKE_CASE )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def _UpperCAmelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Dict[str, Any] , __SCREAMING_SNAKE_CASE : bool = False ):
__a = set(args.keys() )
__a = []
for dtype in self.dataclass_types:
__a = {f.name for f in dataclasses.fields(__SCREAMING_SNAKE_CASE ) if f.init}
__a = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__a = dtype(**__SCREAMING_SNAKE_CASE )
outputs.append(__SCREAMING_SNAKE_CASE )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__SCREAMING_SNAKE_CASE )}""" )
return tuple(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : bool = False ):
with open(Path(__SCREAMING_SNAKE_CASE ) , encoding="utf-8" ) as open_json_file:
__a = json.loads(open_json_file.read() )
__a = self.parse_dict(__SCREAMING_SNAKE_CASE , allow_extra_keys=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : bool = False ):
__a = self.parse_dict(yaml.safe_load(Path(__SCREAMING_SNAKE_CASE ).read_text() ) , allow_extra_keys=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
| 197
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self : Any , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 2_55 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : bool = True , **__SCREAMING_SNAKE_CASE : Optional[int] , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
__a = size if size is not None else {"height": 3_84, "width": 3_84}
__a = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
__a = do_resize
__a = size
__a = resample
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__a = image_std if image_std is not None else OPENAI_CLIP_STD
__a = do_convert_rgb
def _UpperCAmelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Any , ):
__a = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
__a = (size["height"], size["width"])
return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[int, float] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : str , ):
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : int , ):
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : ImageInput , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[float] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : Any , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = resample if resample is not None else self.resample
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__a = size if size is not None else self.size
__a = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
__a = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__a = [convert_to_rgb(__SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
__a = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
__a = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
__a = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
__a = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images]
__a = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
__a = BatchFeature(data={"pixel_values": images} , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_outputs
| 197
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
lowerCamelCase__ = {
'''google/rembert''': 2_56,
}
lowerCamelCase__ = '''▁'''
class __magic_name__ (__lowercase ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = RemBertTokenizer
def __init__( self , _a=None , _a=None , _a=True , _a=True , _a=False , _a="[CLS]" , _a="[SEP]" , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Any:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
lowerCAmelCase_ = do_lower_case
lowerCAmelCase_ = remove_space
lowerCAmelCase_ = keep_accents
lowerCAmelCase_ = vocab_file
lowerCAmelCase_ = False if not self.vocab_file else True
def __a ( self , _a , _a = None ) -> List[int]:
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1]
def __a ( self , _a , _a = None ) -> List[int]:
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error("Vocabulary path ({}) should be a directory".format(_a ) )
return
lowerCAmelCase_ = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 226
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 226
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[str] = BioGptTokenizer
_lowerCamelCase: Tuple = False
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
A = dict(zip(A_ ,range(len(A_ ) ) ) )
A = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file ,'w' ) as fp:
fp.write('\n'.join(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple ) -> int:
A = 'lower newer'
A = 'lower newer'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = BioGptTokenizer(self.vocab_file ,self.merges_file )
A = 'lower'
A = ['low', 'er</w>']
A = tokenizer.tokenize(A_ )
self.assertListEqual(A_ ,A_ )
A = tokens + ['<unk>']
A = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
A = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
A = tokenizer.encode('sequence builders' ,add_special_tokens=A_ )
A = tokenizer.encode('multi-sequence build' ,add_special_tokens=A_ )
A = tokenizer.build_inputs_with_special_tokens(A_ )
A = tokenizer.build_inputs_with_special_tokens(A_ ,A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 91
|
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class UpperCAmelCase :
"""simple docstring"""
def __init__( self ):
lowercase__: Any = {}
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1 ):
if self.graph.get(_UpperCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowercase__: List[Any] = [[w, v]]
if not self.graph.get(_UpperCAmelCase ):
lowercase__: Any = []
def _snake_case ( self ):
return list(self.graph )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
if self.graph.get(_UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase=-2 , _UpperCAmelCase=-1 ):
if s == d:
return []
lowercase__: str = []
lowercase__: Optional[Any] = []
if s == -2:
lowercase__: List[str] = list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
lowercase__: Optional[int] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_UpperCAmelCase ) != 0:
lowercase__: List[Any] = stack[len(_UpperCAmelCase ) - 1]
else:
lowercase__: str = ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return visited
def _snake_case ( self , _UpperCAmelCase=-1 ):
if c == -1:
lowercase__: Union[str, Any] = floor(random() * 10000 ) + 10
for i in range(_UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase__: Optional[int] = floor(random() * c ) + 1
if n != i:
self.add_pair(_UpperCAmelCase , _UpperCAmelCase , 1 )
def _snake_case ( self , _UpperCAmelCase=-2 ):
lowercase__: List[str] = deque()
lowercase__: Any = []
if s == -2:
lowercase__: List[str] = list(self.graph )[0]
d.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
while d:
lowercase__: List[Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: int = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _snake_case ( self , _UpperCAmelCase ):
return len(self.graph[u] )
def _snake_case ( self , _UpperCAmelCase=-2 ):
lowercase__: Dict = []
lowercase__: int = []
if s == -2:
lowercase__: int = list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
lowercase__: Optional[int] = s
lowercase__: Any = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_UpperCAmelCase ) != 0:
lowercase__: str = stack[len(_UpperCAmelCase ) - 1]
else:
lowercase__: Optional[Any] = ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return sorted_nodes
def _snake_case ( self ):
lowercase__: Optional[int] = []
lowercase__: str = []
lowercase__: Union[str, Any] = list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
lowercase__: Optional[int] = -2
lowercase__: Optional[int] = []
lowercase__: Optional[Any] = s
lowercase__: List[str] = False
lowercase__: Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Any = len(_UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: List[str] = True
if len(_UpperCAmelCase ) != 0:
lowercase__: str = stack[len(_UpperCAmelCase ) - 1]
else:
lowercase__: Optional[Any] = False
indirect_parents.append(_UpperCAmelCase )
lowercase__: Union[str, Any] = s
lowercase__: List[Any] = ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return list(_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Union[str, Any] = []
lowercase__: List[str] = []
lowercase__: str = list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
lowercase__: Optional[int] = -2
lowercase__: Tuple = []
lowercase__: Optional[Any] = s
lowercase__: Dict = False
lowercase__: int = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Optional[int] = len(_UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: List[str] = True
if len(_UpperCAmelCase ) != 0:
lowercase__: Union[str, Any] = stack[len(_UpperCAmelCase ) - 1]
else:
lowercase__: Dict = False
indirect_parents.append(_UpperCAmelCase )
lowercase__: int = s
lowercase__: List[str] = ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return False
def _snake_case ( self , _UpperCAmelCase=-2 , _UpperCAmelCase=-1 ):
lowercase__: str = time()
self.dfs(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = time()
return end - begin
def _snake_case ( self , _UpperCAmelCase=-2 ):
lowercase__: List[Any] = time()
self.bfs(_UpperCAmelCase )
lowercase__: Any = time()
return end - begin
class UpperCAmelCase :
"""simple docstring"""
def __init__( self ):
lowercase__: Union[str, Any] = {}
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1 ):
# check if the u exists
if self.graph.get(_UpperCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowercase__: Dict = [[w, v]]
# add the other way
if self.graph.get(_UpperCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowercase__: List[Any] = [[w, u]]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
if self.graph.get(_UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_UpperCAmelCase )
# the other way round
if self.graph.get(_UpperCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase=-2 , _UpperCAmelCase=-1 ):
if s == d:
return []
lowercase__: List[str] = []
lowercase__: List[Any] = []
if s == -2:
lowercase__: Optional[Any] = list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
lowercase__: Any = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_UpperCAmelCase ) != 0:
lowercase__: Dict = stack[len(_UpperCAmelCase ) - 1]
else:
lowercase__: str = ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return visited
def _snake_case ( self , _UpperCAmelCase=-1 ):
if c == -1:
lowercase__: List[str] = floor(random() * 10000 ) + 10
for i in range(_UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase__: List[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(_UpperCAmelCase , _UpperCAmelCase , 1 )
def _snake_case ( self , _UpperCAmelCase=-2 ):
lowercase__: Optional[int] = deque()
lowercase__: Optional[int] = []
if s == -2:
lowercase__: Optional[int] = list(self.graph )[0]
d.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
while d:
lowercase__: Tuple = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _snake_case ( self , _UpperCAmelCase ):
return len(self.graph[u] )
def _snake_case ( self ):
lowercase__: Dict = []
lowercase__: Optional[Any] = []
lowercase__: Union[str, Any] = list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
lowercase__: Union[str, Any] = -2
lowercase__: Dict = []
lowercase__: str = s
lowercase__: Tuple = False
lowercase__: Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: List[str] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Any = len(_UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: Optional[int] = True
if len(_UpperCAmelCase ) != 0:
lowercase__: Optional[Any] = stack[len(_UpperCAmelCase ) - 1]
else:
lowercase__: List[Any] = False
indirect_parents.append(_UpperCAmelCase )
lowercase__: List[Any] = s
lowercase__: Union[str, Any] = ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return list(_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: str = []
lowercase__: List[str] = []
lowercase__: str = list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
lowercase__: Optional[Any] = -2
lowercase__: List[str] = []
lowercase__: List[str] = s
lowercase__: str = False
lowercase__: List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Union[str, Any] = len(_UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: int = True
if len(_UpperCAmelCase ) != 0:
lowercase__: List[Any] = stack[len(_UpperCAmelCase ) - 1]
else:
lowercase__: int = False
indirect_parents.append(_UpperCAmelCase )
lowercase__: Union[str, Any] = s
lowercase__: List[Any] = ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return False
def _snake_case ( self ):
return list(self.graph )
def _snake_case ( self , _UpperCAmelCase=-2 , _UpperCAmelCase=-1 ):
lowercase__: Optional[Any] = time()
self.dfs(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = time()
return end - begin
def _snake_case ( self , _UpperCAmelCase=-2 ):
lowercase__: List[Any] = time()
self.bfs(_UpperCAmelCase )
lowercase__: int = time()
return end - begin
| 586
| 0
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class _lowerCamelCase( _a ):
lowercase_ : Optional[int] = """codegen"""
lowercase_ : Optional[int] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self, lowerCamelCase=5_04_00, lowerCamelCase=20_48, lowerCamelCase=20_48, lowerCamelCase=40_96, lowerCamelCase=28, lowerCamelCase=16, lowerCamelCase=64, lowerCamelCase=None, lowerCamelCase="gelu_new", lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=1E-5, lowerCamelCase=0.0_2, lowerCamelCase=True, lowerCamelCase=5_02_56, lowerCamelCase=5_02_56, lowerCamelCase=False, **lowerCamelCase, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = vocab_size
_lowercase : Dict = n_ctx
_lowercase : Optional[Any] = n_positions
_lowercase : str = n_embd
_lowercase : List[Any] = n_layer
_lowercase : List[str] = n_head
_lowercase : Any = n_inner
_lowercase : Union[str, Any] = rotary_dim
_lowercase : List[Any] = activation_function
_lowercase : Union[str, Any] = resid_pdrop
_lowercase : Optional[int] = embd_pdrop
_lowercase : Dict = attn_pdrop
_lowercase : str = layer_norm_epsilon
_lowercase : List[Any] = initializer_range
_lowercase : Union[str, Any] = use_cache
_lowercase : Tuple = bos_token_id
_lowercase : Any = eos_token_id
super().__init__(
bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, tie_word_embeddings=lowerCamelCase, **lowerCamelCase)
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase = "default", lowerCamelCase = None, lowerCamelCase = False, ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(lowerCamelCase, task=lowerCamelCase, patching_specs=lowerCamelCase, use_past=lowerCamelCase)
if not getattr(self._config, 'pad_token_id', lowerCamelCase):
# TODO: how to do that better?
_lowercase : str = 0
@property
def UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
_lowercase : Dict = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase, direction='inputs')
_lowercase : Optional[int] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_lowercase : Optional[int] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self._config.n_head
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = -1, lowerCamelCase = -1, lowerCamelCase = False, lowerCamelCase = None, ) -> Mapping[str, Any]:
"""simple docstring"""
_lowercase : Optional[int] = super(lowerCamelCase, self).generate_dummy_inputs(
lowerCamelCase, batch_size=lowerCamelCase, seq_length=lowerCamelCase, is_pair=lowerCamelCase, framework=lowerCamelCase)
# We need to order the input in the way they appears in the forward()
_lowercase : int = OrderedDict({'input_ids': common_inputs['input_ids']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
_lowercase , _lowercase : str = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase : Optional[Any] = seqlen + 2
_lowercase : Any = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowercase : List[str] = [
(torch.zeros(lowerCamelCase), torch.zeros(lowerCamelCase)) for _ in range(self.num_layers)
]
_lowercase : Tuple = common_inputs['attention_mask']
if self.use_past:
_lowercase : Union[str, Any] = ordered_inputs['attention_mask'].dtype
_lowercase : Optional[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCamelCase, lowerCamelCase, dtype=lowerCamelCase)], dim=1)
return ordered_inputs
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return 13
| 354
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
def UpperCamelCase_( lowerCamelCase_=None , lowerCamelCase_=None ) -> Tuple:
return field(default_factory=lambda: default , metadata=lowerCamelCase_ )
@dataclass
class _lowerCamelCase:
lowercase_ : List[str] = list_field(
default=[], metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
}, )
lowercase_ : List[int] = list_field(
default=[8], metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
lowercase_ : List[int] = list_field(
default=[8, 32, 1_28, 5_12], metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
lowercase_ : bool = field(default=_a, metadata={"""help""": """Use FP16 to accelerate inference."""} )
lowercase_ : bool = field(default=_a, metadata={"""help""": """Benchmark training of model"""} )
lowercase_ : bool = field(default=_a, metadata={"""help""": """Verbose memory tracing"""} )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""}, )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
}, )
lowercase_ : bool = field(default=_a, metadata={"""help""": """Trace memory line by line"""} )
lowercase_ : bool = field(default=_a, metadata={"""help""": """Save result to a CSV file"""} )
lowercase_ : bool = field(default=_a, metadata={"""help""": """Save all print statements in a log file"""} )
lowercase_ : bool = field(default=_a, metadata={"""help""": """Whether to print environment information"""} )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
}, )
lowercase_ : str = field(
default=F'''inference_time_{round(time() )}.csv''', metadata={"""help""": """CSV filename used if saving time results to csv."""}, )
lowercase_ : str = field(
default=F'''inference_memory_{round(time() )}.csv''', metadata={"""help""": """CSV filename used if saving memory results to csv."""}, )
lowercase_ : str = field(
default=F'''train_time_{round(time() )}.csv''', metadata={"""help""": """CSV filename used if saving time results to csv for training."""}, )
lowercase_ : str = field(
default=F'''train_memory_{round(time() )}.csv''', metadata={"""help""": """CSV filename used if saving memory results to csv for training."""}, )
lowercase_ : str = field(
default=F'''env_info_{round(time() )}.csv''', metadata={"""help""": """CSV filename used if saving environment information."""}, )
lowercase_ : str = field(
default=F'''log_{round(time() )}.csv''', metadata={"""help""": """Log filename used if print statements are saved in log."""}, )
lowercase_ : int = field(default=3, metadata={"""help""": """Times an experiment will be run."""} )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
}, )
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
F'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.', lowerCamelCase, )
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
return json.dumps(dataclasses.asdict(self), indent=2)
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
if len(self.models) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].')
return self.models
@property
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.')
return False
else:
return True
| 354
| 1
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCamelCase = False
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = generator.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 82
|
"""simple docstring"""
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
lowerCAmelCase :Tuple = len(a__ )
print('The following activities are selected:' )
# The first activity is always selected
lowerCAmelCase :Dict = 0
print(a__ , end=',' )
# Consider rest of the activities
for j in range(a__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(a__ , end=',' )
lowerCAmelCase :List[str] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE = [1, 3, 0, 5, 8, 5]
__SCREAMING_SNAKE_CASE = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 553
| 0
|
"""simple docstring"""
def lowercase ( a__ : str , a__ : str = " " ) -> list:
_UpperCamelCase = []
_UpperCamelCase = 0
for index, char in enumerate(a__ ):
if char == separator:
split_words.append(string[last_index:index] )
_UpperCamelCase = index + 1
elif index + 1 == len(a__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 342
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
UpperCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = RealmTokenizer
def __init__( self : Dict , __UpperCamelCase : Any=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : str=True , __UpperCamelCase : Tuple="[UNK]" , __UpperCamelCase : List[str]="[SEP]" , __UpperCamelCase : Tuple="[PAD]" , __UpperCamelCase : Union[str, Any]="[CLS]" , __UpperCamelCase : Optional[int]="[MASK]" , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Union[str, Any]=None , **__UpperCamelCase : List[Any] , ) -> Any:
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , tokenize_chinese_chars=__UpperCamelCase , strip_accents=__UpperCamelCase , **__UpperCamelCase , )
_UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __UpperCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __UpperCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __UpperCamelCase ) != tokenize_chinese_chars
):
_UpperCamelCase = getattr(__UpperCamelCase , normalizer_state.pop('''type''' ) )
_UpperCamelCase = do_lower_case
_UpperCamelCase = strip_accents
_UpperCamelCase = tokenize_chinese_chars
_UpperCamelCase = normalizer_class(**__UpperCamelCase )
_UpperCamelCase = do_lower_case
def _UpperCamelCase ( self : int , __UpperCamelCase : Any , **__UpperCamelCase : Optional[Any] ) -> str:
_UpperCamelCase = PaddingStrategy.MAX_LENGTH
_UpperCamelCase = text
_UpperCamelCase = kwargs.pop('''text_pair''' , __UpperCamelCase )
_UpperCamelCase = kwargs.pop('''return_tensors''' , __UpperCamelCase )
_UpperCamelCase = {
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(__UpperCamelCase ):
if batch_text_pair is not None:
_UpperCamelCase = batch_text_pair[idx]
else:
_UpperCamelCase = None
_UpperCamelCase = super().__call__(__UpperCamelCase , __UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = encoded_candidates.get('''input_ids''' )
_UpperCamelCase = encoded_candidates.get('''attention_mask''' )
_UpperCamelCase = encoded_candidates.get('''token_type_ids''' )
if encoded_input_ids is not None:
output_data["input_ids"].append(__UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__UpperCamelCase )
_UpperCamelCase = {key: item for key, item in output_data.items() if len(__UpperCamelCase ) != 0}
return BatchEncoding(__UpperCamelCase , tensor_type=__UpperCamelCase )
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any]=None ) -> int:
_UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
_UpperCamelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 342
| 1
|
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
assert isinstance(_lowercase , _lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = tmp_path / "cache"
_A = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_A = TextDatasetReader(_lowercase , cache_dir=_lowercase , keep_in_memory=_lowercase ).read()
_check_text_dataset(_lowercase , _lowercase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = tmp_path / "cache"
_A = {"text": "string"}
_A = features.copy() if features else default_expected_features
_A = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_A = TextDatasetReader(_lowercase , features=_lowercase , cache_dir=_lowercase ).read()
_check_text_dataset(_lowercase , _lowercase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = tmp_path / "cache"
_A = {"text": "string"}
_A = TextDatasetReader(_lowercase , cache_dir=_lowercase , split=_lowercase ).read()
_check_text_dataset(_lowercase , _lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if issubclass(_lowercase , _lowercase ):
_A = text_path
elif issubclass(_lowercase , _lowercase ):
_A = [text_path]
_A = tmp_path / "cache"
_A = {"text": "string"}
_A = TextDatasetReader(_lowercase , cache_dir=_lowercase ).read()
_check_text_dataset(_lowercase , _lowercase )
def __A ( _lowercase , _lowercase , _lowercase=("train",) ):
'''simple docstring'''
assert isinstance(_lowercase , _lowercase )
for split in splits:
_A = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = tmp_path / "cache"
_A = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_A = TextDatasetReader({'''train''': text_path} , cache_dir=_lowercase , keep_in_memory=_lowercase ).read()
_check_text_datasetdict(_lowercase , _lowercase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_A = {"text": "string"}
_A = features.copy() if features else default_expected_features
_A = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_A = TextDatasetReader({'''train''': text_path} , features=_lowercase , cache_dir=_lowercase ).read()
_check_text_datasetdict(_lowercase , _lowercase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if split:
_A = {split: text_path}
else:
_A = "train"
_A = {"train": text_path, "test": text_path}
_A = tmp_path / "cache"
_A = {"text": "string"}
_A = TextDatasetReader(_lowercase , cache_dir=_lowercase ).read()
_check_text_datasetdict(_lowercase , _lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 484
|
def snake_case__ ( lowercase , lowercase ):
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 613
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__a )
class A__ ( __a ):
lowerCamelCase__ : List[str] =field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCamelCase__ : Union[str, Any] =Features({"audio": Audio()} )
lowerCamelCase__ : List[Any] =Features({"transcription": Value("string" )} )
lowerCamelCase__ : Any ="audio"
lowerCamelCase__ : str ="transcription"
def lowercase ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , A__ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
__magic_name__ : str = copy.deepcopy(self )
__magic_name__ : Union[str, Any] = self.input_schema.copy()
__magic_name__ : str = features[self.audio_column]
__magic_name__ : int = input_schema
return task_template
@property
def lowercase ( self ) -> Dict[str, str]:
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 700
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowercase_ = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
lowercase_ = subprocess.check_output(f"git diff --name-only {fork_point_sha}".split()).decode('''utf-8''').split()
lowercase_ = '''|'''.join(sys.argv[1:])
lowercase_ = re.compile(rf"^({joined_dirs}).*?\.py$")
lowercase_ = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 336
| 0
|
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
_SCREAMING_SNAKE_CASE = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=a_ , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=a_ , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=a_ )
return parser.parse_args()
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
_SCREAMING_SNAKE_CASE = parse_args()
# Import training_script as a module.
_SCREAMING_SNAKE_CASE = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_SCREAMING_SNAKE_CASE = script_fpath.stem
_SCREAMING_SNAKE_CASE = importlib.import_module(a_ )
# Patch sys.argv
_SCREAMING_SNAKE_CASE = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 418
|
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase (a_ :int , a_ :Union[str, Any] , a_ :List[Any]) -> List[str]:
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def lowerCamelCase (a_ :Optional[Any] , a_ :Optional[int] , a_ :str , a_ :Any="attention") -> Optional[int]:
lowercase :Tuple = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :])
lowercase :int = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2])
lowercase :str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :])
lowercase :Any = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2])
lowercase :int = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :])
lowercase :List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2])
lowercase :List[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :])
lowercase :Optional[int] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2])
return k, o, q, v
def lowerCamelCase (a_ :Any , a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Union[str, Any]=False) -> List[Any]:
if split_mlp_wi:
lowercase :List[Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
lowercase :Optional[int] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
lowercase :Dict = (wi_a, wi_a)
else:
lowercase :Optional[Any] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
lowercase :Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def lowerCamelCase (a_ :Any , a_ :Optional[Any] , a_ :Optional[Any] , a_ :Union[str, Any]) -> Optional[Any]:
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def lowerCamelCase (a_ :dict , *, a_ :int , a_ :bool , a_ :bool = False) -> int:
lowercase :Dict = traverse_util.flatten_dict(variables['''target'''])
lowercase :Optional[Any] = {'''/'''.join(a_): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase :str = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , a_)
lowercase :str = collections.OrderedDict()
# Shared embeddings.
lowercase :int = old['''token_embedder/embedding''']
# Encoder.
for i in range(a_):
# Block i, layer 0 (Self Attention).
lowercase :Union[str, Any] = tax_layer_norm_lookup(a_ , a_ , '''encoder''' , '''pre_attention_layer_norm''')
lowercase , lowercase , lowercase , lowercase :Tuple = tax_attention_lookup(a_ , a_ , '''encoder''' , '''attention''')
lowercase :Dict = layer_norm
lowercase :Dict = k.T
lowercase :Union[str, Any] = o.T
lowercase :List[Any] = q.T
lowercase :int = v.T
# Block i, layer 1 (MLP).
lowercase :Optional[int] = tax_layer_norm_lookup(a_ , a_ , '''encoder''' , '''pre_mlp_layer_norm''')
lowercase , lowercase :str = tax_mlp_lookup(a_ , a_ , '''encoder''' , a_)
lowercase :int = layer_norm
if split_mlp_wi:
lowercase :Tuple = wi[0].T
lowercase :Tuple = wi[1].T
else:
lowercase :int = wi.T
lowercase :Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase :Dict = tax_relpos_bias_lookup(
a_ , a_ , '''encoder''').T
lowercase :str = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
lowercase :str = tax_relpos_bias_lookup(
a_ , 0 , '''encoder''').T
lowercase :List[Any] = tax_relpos_bias_lookup(
a_ , 0 , '''decoder''').T
if not is_encoder_only:
# Decoder.
for i in range(a_):
# Block i, layer 0 (Self Attention).
lowercase :Any = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_self_attention_layer_norm''')
lowercase , lowercase , lowercase , lowercase :str = tax_attention_lookup(a_ , a_ , '''decoder''' , '''self_attention''')
lowercase :List[str] = layer_norm
lowercase :Dict = k.T
lowercase :List[Any] = o.T
lowercase :List[Any] = q.T
lowercase :Any = v.T
# Block i, layer 1 (Cross Attention).
lowercase :Tuple = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_cross_attention_layer_norm''')
lowercase , lowercase , lowercase , lowercase :int = tax_attention_lookup(a_ , a_ , '''decoder''' , '''encoder_decoder_attention''')
lowercase :int = layer_norm
lowercase :Dict = k.T
lowercase :int = o.T
lowercase :List[Any] = q.T
lowercase :Tuple = v.T
# Block i, layer 2 (MLP).
lowercase :Any = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_mlp_layer_norm''')
lowercase , lowercase :Tuple = tax_mlp_lookup(a_ , a_ , '''decoder''' , a_)
lowercase :Any = layer_norm
if split_mlp_wi:
lowercase :int = wi[0].T
lowercase :Union[str, Any] = wi[1].T
else:
lowercase :int = wi.T
lowercase :List[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase :Union[str, Any] = tax_relpos_bias_lookup(a_ , a_ , '''decoder''').T
lowercase :Union[str, Any] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase :int = old['''decoder/logits_dense/kernel'''].T
return new
def lowerCamelCase (a_ :Dict , a_ :bool) -> Tuple:
lowercase :str = collections.OrderedDict([(k, torch.from_numpy(v.copy())) for (k, v) in converted_params.items()])
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase :Any = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase :Optional[Any] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''')
lowercase :Optional[int] = state_dict['''shared.weight''']
return state_dict
def lowerCamelCase (a_ :List[str] , a_ :List[str] , a_ :Tuple , a_ :Optional[int] , a_ :List[str]) -> List[str]:
lowercase :Optional[Any] = checkpoints.load_tax_checkpoint(a_)
lowercase :Optional[int] = convert_tax_to_pytorch(
a_ , num_layers=config.num_layers , is_encoder_only=a_ , scalable_attention=a_)
lowercase :Union[str, Any] = make_state_dict(a_ , a_)
model.load_state_dict(a_ , strict=a_)
def lowerCamelCase (a_ :str , a_ :Optional[int] , a_ :Any , a_ :bool = False , a_ :bool = False , ) -> Tuple:
lowercase :Optional[int] = MTaConfig.from_json_file(a_)
print(F"""Building PyTorch model from configuration: {config}""")
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase :Union[str, Any] = UMTaEncoderModel(a_)
else:
lowercase :int = UMTaForConditionalGeneration(a_)
# Load weights from tf checkpoint
load_tax_weights_in_ta(a_ , a_ , a_ , a_ , a_)
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""")
model.save_pretrained(a_)
# Verify that we can load the checkpoint.
model.from_pretrained(a_)
print('''Done''')
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 677
| 0
|
'''simple docstring'''
from __future__ import annotations
def __snake_case ( _UpperCAmelCase : int):
UpperCamelCase = str(_UpperCAmelCase)
return len(_UpperCAmelCase) == 9 and set(_UpperCAmelCase) == set('''123456789''')
def __snake_case ( ):
for base_num in range(9999, 4999, -1):
UpperCamelCase = 10_0002 * base_num
if is_9_pandigital(_UpperCAmelCase):
return candidate
for base_num in range(333, 99, -1):
UpperCamelCase = 100_2003 * base_num
if is_9_pandigital(_UpperCAmelCase):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 350
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ : Tuple = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class lowercase__ ( snake_case_, unittest.TestCase ):
'''simple docstring'''
_snake_case = PegasusTokenizer
_snake_case = PegasusTokenizerFast
_snake_case = True
_snake_case = True
def UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = PegasusTokenizer(lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def UpperCAmelCase ( self , **lowerCamelCase__ ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
return ("This is a test", "This is a test")
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = '''</s>'''
UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(lowerCamelCase__ ) , 1_1_0_3 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCamelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCamelCase = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
UpperCamelCase = rust_tokenizer([raw_input_str] , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ).input_ids[0]
UpperCamelCase = py_tokenizer([raw_input_str] , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ).input_ids[0]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCamelCase = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
UpperCamelCase = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
UpperCamelCase = tokenizer([raw_input_str] , return_tensors=lowerCamelCase__ ).input_ids[0]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
UpperCamelCase = '''To ensure a smooth flow of bank resolutions.'''
UpperCamelCase = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
UpperCamelCase = tokenizer([raw_input_str] , return_tensors=lowerCamelCase__ ).input_ids[0]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ['''This is going to be way too long.''' * 1_5_0, '''short example''']
UpperCamelCase = ['''not super long but more than 5 tokens''', '''tiny''']
UpperCamelCase = self._large_tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='''pt''' )
UpperCamelCase = self._large_tokenizer(
text_target=lowerCamelCase__ , max_length=5 , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCamelCase__ ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = {'''input_ids''': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class lowercase__ ( snake_case_, unittest.TestCase ):
'''simple docstring'''
_snake_case = PegasusTokenizer
_snake_case = PegasusTokenizerFast
_snake_case = True
_snake_case = True
def UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = PegasusTokenizer(lowerCamelCase__ , offset=0 , mask_token_sent=lowerCamelCase__ , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def UpperCAmelCase ( self , **lowerCamelCase__ ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
return ("This is a test", "This is a test")
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCamelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCamelCase = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
UpperCamelCase = rust_tokenizer([raw_input_str] , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ).input_ids[0]
UpperCamelCase = py_tokenizer([raw_input_str] , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ).input_ids[0]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@require_torch
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ['''This is going to be way too long.''' * 1_0_0_0, '''short example''']
UpperCamelCase = ['''not super long but more than 5 tokens''', '''tiny''']
UpperCamelCase = self._large_tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='''pt''' )
UpperCamelCase = self._large_tokenizer(
text_target=lowerCamelCase__ , max_length=5 , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCamelCase__ ) == 2 # input_ids, attention_mask.
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
UpperCamelCase = self._large_tokenizer(lowerCamelCase__ ).input_ids
self.assertListEqual(
lowerCamelCase__ , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 350
| 1
|
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def A ( ) -> Dict:
UpperCamelCase__ :Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
UpperCamelCase__ :List[str] = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(lowercase__ )
DownloadCommand.register_subcommand(lowercase__ )
EnvironmentCommand.register_subcommand(lowercase__ )
RunCommand.register_subcommand(lowercase__ )
ServeCommand.register_subcommand(lowercase__ )
UserCommands.register_subcommand(lowercase__ )
AddNewModelCommand.register_subcommand(lowercase__ )
AddNewModelLikeCommand.register_subcommand(lowercase__ )
LfsCommands.register_subcommand(lowercase__ )
PTtoTFCommand.register_subcommand(lowercase__ )
# Let's go
UpperCamelCase__ :Union[str, Any] = parser.parse_args()
if not hasattr(lowercase__ , """func""" ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase__ :Optional[Any] = args.func(lowercase__ )
service.run()
if __name__ == "__main__":
main()
| 45
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
_snake_case : Tuple = """dinat"""
_snake_case : List[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase__ :int=4 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :List[Any]=64 , lowerCamelCase__ :Any=[3, 4, 6, 5] , lowerCamelCase__ :Tuple=[2, 4, 8, 16] , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCamelCase__ :Tuple=3.0 , lowerCamelCase__ :str=True , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :int=0.1 , lowerCamelCase__ :Optional[Any]="gelu" , lowerCamelCase__ :Optional[Any]=0.02 , lowerCamelCase__ :Union[str, Any]=1e-5 , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :List[str]=None , lowerCamelCase__ :str=None , **lowerCamelCase__ :List[Any] , ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :Any = patch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :int = embed_dim
UpperCamelCase__ :Optional[Any] = depths
UpperCamelCase__ :Any = len(lowerCamelCase__ )
UpperCamelCase__ :str = num_heads
UpperCamelCase__ :Optional[int] = kernel_size
UpperCamelCase__ :Optional[int] = dilations
UpperCamelCase__ :Tuple = mlp_ratio
UpperCamelCase__ :Dict = qkv_bias
UpperCamelCase__ :List[str] = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = drop_path_rate
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :List[Any] = layer_norm_eps
UpperCamelCase__ :Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ :Tuple = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
UpperCamelCase__ :Tuple = layer_scale_init_value
UpperCamelCase__ :Optional[int] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ :List[str] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
| 45
| 1
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class a__ ( snake_case__ ):
def __init__( self , _A , _A = None , _A = None , _A = False , _A = False , _A = None , _A = None , **_A , ):
"""simple docstring"""
super().__init__(
features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , )
__lowerCAmelCase = Generator(
cache_dir=_A , features=_A , generator=_A , gen_kwargs=_A , **_A , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if self.streaming:
__lowerCAmelCase = self.builder.as_streaming_dataset(split="train" )
# Build regular (map-style) dataset
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , )
__lowerCAmelCase = self.builder.as_dataset(
split="train" , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
| 715
|
from math import ceil, sqrt
def _a ( SCREAMING_SNAKE_CASE_ : int = 1_00_00_00 ):
__lowerCAmelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__lowerCAmelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__lowerCAmelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 552
| 0
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase = 16
__lowerCamelCase = 32
def a ( __snake_case : Accelerator, __snake_case : int = 16 ):
'''simple docstring'''
UpperCAmelCase_ :str = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase_ :Optional[int] = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(__snake_case : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ :List[Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=UpperCAmelCase_, max_length=UpperCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ :Tuple = datasets.map(
UpperCAmelCase_, batched=UpperCAmelCase_, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ :List[str] = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(__snake_case : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ :Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ :List[str] = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ :Optional[int] = 8
else:
UpperCAmelCase_ :List[Any] = None
return tokenizer.pad(
UpperCAmelCase_, padding='''longest''', max_length=UpperCAmelCase_, pad_to_multiple_of=UpperCAmelCase_, return_tensors='''pt''', )
# Instantiate dataloaders.
UpperCAmelCase_ :Union[str, Any] = DataLoader(
tokenized_datasets['''train'''], shuffle=UpperCAmelCase_, collate_fn=UpperCAmelCase_, batch_size=UpperCAmelCase_ )
UpperCAmelCase_ :Any = DataLoader(
tokenized_datasets['''validation'''], shuffle=UpperCAmelCase_, collate_fn=UpperCAmelCase_, batch_size=UpperCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCamelCase = mocked_dataloaders # noqa: F811
def a ( __snake_case : Optional[Any], __snake_case : Dict ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', UpperCAmelCase_ ) == "1":
UpperCAmelCase_ :Optional[int] = 2
# New Code #
UpperCAmelCase_ :Tuple = int(args.gradient_accumulation_steps )
UpperCAmelCase_ :List[Any] = int(args.local_sgd_steps )
# Initialize accelerator
UpperCAmelCase_ :List[Any] = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=UpperCAmelCase_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ :List[Any] = config['''lr''']
UpperCAmelCase_ :str = int(config['''num_epochs'''] )
UpperCAmelCase_ :Dict = int(config['''seed'''] )
UpperCAmelCase_ :int = int(config['''batch_size'''] )
UpperCAmelCase_ :Union[str, Any] = evaluate.load('''glue''', '''mrpc''' )
set_seed(UpperCAmelCase_ )
UpperCAmelCase_ :List[Any] = get_dataloaders(UpperCAmelCase_, UpperCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ :Dict = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=UpperCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ :Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ :str = AdamW(params=model.parameters(), lr=UpperCAmelCase_ )
# Instantiate scheduler
UpperCAmelCase_ :str = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase_, num_warmup_steps=100, num_training_steps=(len(UpperCAmelCase_ ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ :Optional[Any] = accelerator.prepare(
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
# Now we train the model
for epoch in range(UpperCAmelCase_ ):
model.train()
with LocalSGD(
accelerator=UpperCAmelCase_, model=UpperCAmelCase_, local_sgd_steps=UpperCAmelCase_, enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(UpperCAmelCase_ ):
UpperCAmelCase_ :Optional[Any] = model(**UpperCAmelCase_ )
UpperCAmelCase_ :Union[str, Any] = output.loss
accelerator.backward(UpperCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ :List[Any] = model(**UpperCAmelCase_ )
UpperCAmelCase_ :List[Any] = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ :Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCAmelCase_, references=UpperCAmelCase_, )
UpperCAmelCase_ :Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:', UpperCAmelCase_ )
def a ( ):
'''simple docstring'''
UpperCAmelCase_ :Union[str, Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=UpperCAmelCase_, default=UpperCAmelCase_, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''', type=UpperCAmelCase_, default=1, help='''The number of minibatches to be ran before gradients are accumulated.''', )
parser.add_argument(
'''--local_sgd_steps''', type=UpperCAmelCase_, default=8, help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
UpperCAmelCase_ :Dict = parser.parse_args()
UpperCAmelCase_ :int = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCAmelCase_, UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 608
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class lowercase_ ( A , unittest.TestCase ):
__lowerCamelCase = PriorTransformer
__lowerCamelCase = "hidden_states"
@property
def _snake_case ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ : List[str] =4
SCREAMING_SNAKE_CASE_ : Optional[int] =8
SCREAMING_SNAKE_CASE_ : Optional[Any] =7
SCREAMING_SNAKE_CASE_ : Dict =floats_tensor((batch_size, embedding_dim) ).to(__A )
SCREAMING_SNAKE_CASE_ : Dict =floats_tensor((batch_size, embedding_dim) ).to(__A )
SCREAMING_SNAKE_CASE_ : Dict =floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(__A )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _snake_case ( self , __A=0 ) -> int:
torch.manual_seed(__A )
SCREAMING_SNAKE_CASE_ : str =4
SCREAMING_SNAKE_CASE_ : Union[str, Any] =8
SCREAMING_SNAKE_CASE_ : List[Any] =7
SCREAMING_SNAKE_CASE_ : Tuple =torch.randn((batch_size, embedding_dim) ).to(__A )
SCREAMING_SNAKE_CASE_ : int =torch.randn((batch_size, embedding_dim) ).to(__A )
SCREAMING_SNAKE_CASE_ : List[Any] =torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__A )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _snake_case ( self ) -> Union[str, Any]:
return (4, 8)
@property
def _snake_case ( self ) -> int:
return (4, 8)
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : List[Any] ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 4,
'''num_layers''': 2,
'''embedding_dim''': 8,
'''num_embeddings''': 7,
'''additional_embeddings''': 4,
}
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any =PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' , output_loading_info=__A )
self.assertIsNotNone(__A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(__A )
SCREAMING_SNAKE_CASE_ : Optional[int] =model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _snake_case ( self ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] =self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] =self.model_class(**__A )
SCREAMING_SNAKE_CASE_ : List[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Optional[int] =['''hidden_states''', '''timestep''']
self.assertListEqual(arg_names[:2] , __A )
def _snake_case ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ : Dict =PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =model.to(__A )
if hasattr(__A , '''set_default_attn_processor''' ):
model.set_default_attn_processor()
SCREAMING_SNAKE_CASE_ : List[Any] =self.get_dummy_seed_input()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str =model(**__A )[0]
SCREAMING_SNAKE_CASE_ : Any =output[0, :5].flatten().cpu()
print(__A )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
SCREAMING_SNAKE_CASE_ : int =torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(__A , __A , rtol=1e-2 ) )
@slow
class lowercase_ ( unittest.TestCase ):
def _snake_case ( self , __A=1 , __A=768 , __A=77 , __A=0 ) -> str:
torch.manual_seed(__A )
SCREAMING_SNAKE_CASE_ : Dict =batch_size
SCREAMING_SNAKE_CASE_ : List[str] =embedding_dim
SCREAMING_SNAKE_CASE_ : Optional[int] =num_embeddings
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.randn((batch_size, embedding_dim) ).to(__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.randn((batch_size, embedding_dim) ).to(__A )
SCREAMING_SNAKE_CASE_ : List[str] =torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__A )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _snake_case ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def _snake_case ( self , __A , __A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Dict =PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' , subfolder='''prior''' )
model.to(__A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.get_dummy_seed_input(seed=__A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict =model(**__A )[0]
assert list(sample.shape ) == [1, 768]
SCREAMING_SNAKE_CASE_ : Dict =sample[0, :8].flatten().cpu()
print(__A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.tensor(__A )
assert torch_all_close(__A , __A , atol=1e-3 )
| 443
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple=7 , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : Tuple=18 , SCREAMING_SNAKE_CASE : Union[str, Any]=30 , SCREAMING_SNAKE_CASE : str=400 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Optional[Any]=True , ):
lowercase__ : Optional[Any] = size if size is not None else {"height": 18, "width": 18}
lowercase__ : Dict = parent
lowercase__ : str = batch_size
lowercase__ : int = num_channels
lowercase__ : Optional[Any] = image_size
lowercase__ : str = min_resolution
lowercase__ : Optional[Any] = max_resolution
lowercase__ : List[str] = do_resize
lowercase__ : Dict = size
lowercase__ : List[str] = apply_ocr
def snake_case ( self : int ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class snake_case__(_A , unittest.TestCase ):
"""simple docstring"""
lowercase_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : str ):
lowercase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "apply_ocr" ) )
def snake_case ( self : Any ):
lowercase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowercase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def snake_case ( self : Optional[int] ):
pass
def snake_case ( self : List[Any] ):
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
lowercase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes , SCREAMING_SNAKE_CASE )
# Test batched
lowercase__ : Union[str, Any] = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def snake_case ( self : Tuple ):
# Initialize image_processing
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowercase__ : Dict = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def snake_case ( self : Optional[int] ):
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
lowercase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowercase__ : int = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def snake_case ( self : Optional[int] ):
# with apply_OCR = True
lowercase__ : str = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowercase__ : Tuple = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowercase__ : str = Image.open(ds[0]["file"] ).convert("RGB" )
lowercase__ : List[Any] = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowercase__ : str = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowercase__ : Dict = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes , SCREAMING_SNAKE_CASE )
# with apply_OCR = False
lowercase__ : Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 713
|
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : CLIPSegForImageSegmentation , SCREAMING_SNAKE_CASE : CLIPSegProcessor , SCREAMING_SNAKE_CASE : AutoencoderKL , SCREAMING_SNAKE_CASE : CLIPTextModel , SCREAMING_SNAKE_CASE : CLIPTokenizer , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : int = dict(scheduler.config )
lowercase__ : Any = 1
lowercase__ : Union[str, Any] = FrozenDict(SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = dict(scheduler.config )
lowercase__ : Union[str, Any] = True
lowercase__ : int = FrozenDict(SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ : Union[str, Any] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, List[str]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 50 , SCREAMING_SNAKE_CASE : float = 7.5 , SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE : int = 1 , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
lowercase__ : Dict = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowercase__ : int = self.segmentation_model(**SCREAMING_SNAKE_CASE )
lowercase__ : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase__ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase__ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
| 81
| 0
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __UpperCAmelCase ( lowerCAmelCase ,lowerCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : str , _lowercase : int = 128 , _lowercase : int = 256 , _lowercase : float = 20_00.0 , _lowercase : int = 768 , _lowercase : int = 12 , _lowercase : int = 12 , _lowercase : int = 64 , _lowercase : int = 2_048 , _lowercase : float = 0.1 , ) -> Tuple:
super().__init__()
A_ = nn.Sequential(
nn.Linear(_lowercase , d_model * 4 , bias=_lowercase) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowercase) , nn.SiLU() , )
A_ = nn.Embedding(_lowercase , _lowercase)
A_ = False
A_ = nn.Linear(_lowercase , _lowercase , bias=_lowercase)
A_ = nn.Dropout(p=_lowercase)
A_ = nn.ModuleList()
for lyr_num in range(_lowercase):
# FiLM conditional T5 decoder
A_ = DecoderLayer(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase)
self.decoders.append(_lowercase)
A_ = TaLayerNorm(_lowercase)
A_ = nn.Dropout(p=_lowercase)
A_ = nn.Linear(_lowercase , _lowercase , bias=_lowercase)
def __snake_case ( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : Any) -> str:
A_ = torch.mul(query_input.unsqueeze(-1) , key_input.unsqueeze(-2))
return mask.unsqueeze(-3)
def __snake_case ( self : Tuple , _lowercase : Any , _lowercase : List[str] , _lowercase : List[str]) -> int:
A_ , A_ , A_ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
A_ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype)
A_ = self.conditioning_emb(_lowercase).unsqueeze(1)
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
A_ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
A_ = torch.broadcast_to(
torch.arange(_lowercase , device=decoder_input_tokens.device) , (batch, seq_length) , )
A_ = self.position_encoding(_lowercase)
A_ = self.continuous_inputs_projection(_lowercase)
inputs += position_encodings
A_ = self.dropout(_lowercase)
# decoder: No padding present.
A_ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype)
# Translate encoding masks to encoder-decoder masks.
A_ = [(x, self.encoder_decoder_mask(_lowercase , _lowercase)) for x, y in encodings_and_masks]
# cross attend style: concat encodings
A_ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1)
A_ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1)
for lyr in self.decoders:
A_ = lyr(
_lowercase , conditioning_emb=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )[0]
A_ = self.decoder_norm(_lowercase)
A_ = self.post_dropout(_lowercase)
A_ = self.spec_out(_lowercase)
return spec_out
class __UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , _lowercase : str , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Union[str, Any] , _lowercase : Tuple=1E-6) -> Union[str, Any]:
super().__init__()
A_ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase))
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase , ))
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase))
def __snake_case ( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=None , _lowercase : Any=None , _lowercase : Optional[Any]=None , _lowercase : str=None , ) -> Dict:
A_ = self.layer[0](
_lowercase , conditioning_emb=_lowercase , attention_mask=_lowercase , )
if encoder_hidden_states is not None:
A_ = torch.where(encoder_attention_mask > 0 , 0 , -1E10).to(
encoder_hidden_states.dtype)
A_ = self.layer[1](
_lowercase , key_value_states=_lowercase , attention_mask=_lowercase , )
# Apply Film Conditional Feed Forward layer
A_ = self.layer[-1](_lowercase , _lowercase)
return (hidden_states,)
class __UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , _lowercase : List[str] , _lowercase : int , _lowercase : Optional[int] , _lowercase : List[Any]) -> Optional[Any]:
super().__init__()
A_ = TaLayerNorm(_lowercase)
A_ = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase)
A_ = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase)
A_ = nn.Dropout(_lowercase)
def __snake_case ( self : Optional[Any] , _lowercase : List[Any] , _lowercase : Union[str, Any]=None , _lowercase : Optional[Any]=None , ) -> Tuple:
# pre_self_attention_layer_norm
A_ = self.layer_norm(_lowercase)
if conditioning_emb is not None:
A_ = self.FiLMLayer(_lowercase , _lowercase)
# Self-attention block
A_ = self.attention(_lowercase)
A_ = hidden_states + self.dropout(_lowercase)
return hidden_states
class __UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , _lowercase : int , _lowercase : Optional[int] , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Tuple) -> Union[str, Any]:
super().__init__()
A_ = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase)
A_ = TaLayerNorm(_lowercase , eps=_lowercase)
A_ = nn.Dropout(_lowercase)
def __snake_case ( self : Optional[Any] , _lowercase : List[str] , _lowercase : Any=None , _lowercase : List[str]=None , ) -> Tuple:
A_ = self.layer_norm(_lowercase)
A_ = self.attention(
_lowercase , encoder_hidden_states=_lowercase , attention_mask=attention_mask.squeeze(1) , )
A_ = hidden_states + self.dropout(_lowercase)
return layer_output
class __UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowercase : List[Any] , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Optional[int]) -> Union[str, Any]:
super().__init__()
A_ = TaDenseGatedActDense(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase)
A_ = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase)
A_ = TaLayerNorm(_lowercase , eps=_lowercase)
A_ = nn.Dropout(_lowercase)
def __snake_case ( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : str=None) -> int:
A_ = self.layer_norm(_lowercase)
if conditioning_emb is not None:
A_ = self.film(_lowercase , _lowercase)
A_ = self.DenseReluDense(_lowercase)
A_ = hidden_states + self.dropout(_lowercase)
return hidden_states
class __UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , _lowercase : List[str] , _lowercase : Any , _lowercase : Tuple) -> Any:
super().__init__()
A_ = nn.Linear(_lowercase , _lowercase , bias=_lowercase)
A_ = nn.Linear(_lowercase , _lowercase , bias=_lowercase)
A_ = nn.Linear(_lowercase , _lowercase , bias=_lowercase)
A_ = nn.Dropout(_lowercase)
A_ = NewGELUActivation()
def __snake_case ( self : Union[str, Any] , _lowercase : List[Any]) -> List[Any]:
A_ = self.act(self.wi_a(_lowercase))
A_ = self.wi_a(_lowercase)
A_ = hidden_gelu * hidden_linear
A_ = self.dropout(_lowercase)
A_ = self.wo(_lowercase)
return hidden_states
class __UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , _lowercase : Dict , _lowercase : Tuple=1E-6) -> List[Any]:
super().__init__()
A_ = nn.Parameter(torch.ones(_lowercase))
A_ = eps
def __snake_case ( self : Optional[int] , _lowercase : Dict) -> str:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
A_ = hidden_states.to(torch.floataa).pow(2).mean(-1 , keepdim=_lowercase)
A_ = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
A_ = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class __UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __snake_case ( self : Optional[int] , _lowercase : torch.Tensor) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.04_47_15 * torch.pow(_lowercase , 3.0))))
class __UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , _lowercase : Dict , _lowercase : Optional[Any]) -> Dict:
super().__init__()
A_ = nn.Linear(_lowercase , out_features * 2 , bias=_lowercase)
def __snake_case ( self : List[str] , _lowercase : List[Any] , _lowercase : Optional[Any]) -> Dict:
A_ = self.scale_bias(_lowercase)
A_ , A_ = torch.chunk(_lowercase , 2 , -1)
A_ = x * (1 + scale) + shift
return x
| 366
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = ViTImageProcessor if is_vision_available() else None
@property
def __snake_case ( self : List[str]) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Optional[Any]) -> List[str]:
A_ = (3, 32, 128)
A_ = tempfile.mkdtemp()
# fmt: off
A_ = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
A_ = dict(zip(_lowercase , range(len(_lowercase))))
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(_lowercase) + '\n')
A_ = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 32, 'width': 128},
}
A_ = os.path.join(self.tmpdirname , _lowercase)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(_lowercase , _lowercase)
def __snake_case ( self : int , **_lowercase : Optional[int]) -> int:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase)
def __snake_case ( self : Optional[int] , **_lowercase : Optional[int]) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowercase)
def __snake_case ( self : Dict) -> str:
shutil.rmtree(self.tmpdirname)
def __snake_case ( self : Union[str, Any]) -> Any:
A_ = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
A_ = Image.fromarray(np.moveaxis(_lowercase , 0 , -1))
return image_input
def __snake_case ( self : Optional[Any]) -> List[Any]:
A_ = self.get_tokenizer()
A_ = self.get_image_processor()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
processor.save_pretrained(self.tmpdirname)
A_ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_lowercase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
def __snake_case ( self : Union[str, Any]) -> Optional[Any]:
A_ = self.get_tokenizer()
A_ = self.get_image_processor()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
processor.save_pretrained(self.tmpdirname)
A_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
A_ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0)
A_ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_lowercase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
def __snake_case ( self : List[Any]) -> str:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = self.prepare_image_inputs()
A_ = image_processor(_lowercase , return_tensors='np')
A_ = processor(images=_lowercase , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def __snake_case ( self : Any) -> str:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = 'test'
A_ = processor(text=_lowercase)
A_ = tokenizer(_lowercase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __snake_case ( self : str) -> Dict:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = 'test'
A_ = self.prepare_image_inputs()
A_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'labels'])
# test if it raises when no input is passed
with pytest.raises(_lowercase):
processor()
def __snake_case ( self : Union[str, Any]) -> Optional[int]:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.char_decode(_lowercase)
A_ = tokenizer.batch_decode(_lowercase)
A_ = [seq.replace(' ' , '') for seq in decoded_tok]
self.assertListEqual(_lowercase , _lowercase)
def __snake_case ( self : List[str]) -> str:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = None
A_ = self.prepare_image_inputs()
A_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def __snake_case ( self : List[str]) -> Any:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = torch.randn(1 , 27 , 38)
A_ = torch.randn(1 , 27 , 50_257)
A_ = torch.randn(1 , 27 , 30_522)
A_ = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'])
| 366
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a : str = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 714
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_a : Dict = logging.get_logger(__name__)
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ = None ) -> str:
"""simple docstring"""
snake_case : Any = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
snake_case : str = to_pil_image(__magic_name__ )
snake_case , snake_case : Union[str, Any] = pil_image.size
snake_case : List[Any] = pytesseract.image_to_data(__magic_name__ , lang=__magic_name__ , output_type='''dict''' , config=__magic_name__ )
snake_case , snake_case , snake_case , snake_case , snake_case : Optional[Any] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
snake_case : Union[str, Any] = [idx for idx, word in enumerate(__magic_name__ ) if not word.strip()]
snake_case : Union[str, Any] = [word for idx, word in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : Optional[Any] = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case : List[Any] = []
for x, y, w, h in zip(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
snake_case : Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(__magic_name__ )
# finally, normalize the bounding boxes
snake_case : List[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__magic_name__ , __magic_name__ , __magic_name__ ) )
assert len(__magic_name__ ) == len(__magic_name__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a_ ( a ):
A__ : int = ['pixel_values']
def __init__( self : Optional[int] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = "" , **UpperCAmelCase__ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : Any = size if size is not None else {'''height''': 224, '''width''': 224}
snake_case : Tuple = get_size_dict(UpperCAmelCase__ )
snake_case : Dict = do_resize
snake_case : str = size
snake_case : Optional[int] = resample
snake_case : Union[str, Any] = apply_ocr
snake_case : int = ocr_lang
snake_case : Union[str, Any] = tesseract_config
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Any , ):
"""simple docstring"""
snake_case : Dict = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
snake_case : Tuple = (size['''height'''], size['''width'''])
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : List[Any] , ):
"""simple docstring"""
snake_case : Tuple = do_resize if do_resize is not None else self.do_resize
snake_case : List[Any] = size if size is not None else self.size
snake_case : Tuple = get_size_dict(UpperCAmelCase__ )
snake_case : str = resample if resample is not None else self.resample
snake_case : Optional[int] = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case : Any = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case : List[str] = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
snake_case : Any = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
snake_case : Optional[int] = []
snake_case : Union[str, Any] = []
for image in images:
snake_case , snake_case : List[Any] = apply_tesseract(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
words_batch.append(UpperCAmelCase__ )
boxes_batch.append(UpperCAmelCase__ )
if do_resize:
snake_case : Any = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
snake_case : int = [flip_channel_order(UpperCAmelCase__ ) for image in images]
snake_case : Optional[Any] = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
snake_case : List[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase__ )
if apply_ocr:
snake_case : Dict = words_batch
snake_case : Dict = boxes_batch
return data
| 84
| 0
|
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
assert isinstance(snake_case_,snake_case_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""",[False, True] )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Any = tmp_path / """cache"""
_A : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_A : Union[str, Any] = SqlDatasetReader(
"""dataset""","""sqlite:///""" + sqlite_path,cache_dir=snake_case_,keep_in_memory=snake_case_ ).read()
_check_sql_dataset(snake_case_,snake_case_ )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""",[
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
],)
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Union[str, Any] = tmp_path / """cache"""
_A : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_A : int = features.copy() if features else default_expected_features
_A : int = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_A : Optional[int] = SqlDatasetReader("""dataset""","""sqlite:///""" + sqlite_path,features=snake_case_,cache_dir=snake_case_ ).read()
_check_sql_dataset(snake_case_,snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
with contextlib.closing(sqlitea.connect(snake_case_ ) ) as con:
_A : Optional[int] = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = tmp_path / """cache"""
_A : Union[str, Any] = os.path.join(snake_case_,"""tmp.sql""" )
_A : str = SqlDatasetReader("""dataset""","""sqlite:///""" + sqlite_path,cache_dir=snake_case_ ).read()
SqlDatasetWriter(snake_case_,"""dataset""","""sqlite:///""" + output_sqlite_path,num_proc=1 ).write()
_A : Optional[int] = iter_sql_file(snake_case_ )
_A : Tuple = iter_sql_file(snake_case_ )
for rowa, rowa in zip(snake_case_,snake_case_ ):
assert rowa == rowa
@require_sqlalchemy
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = tmp_path / """cache"""
_A : Dict = os.path.join(snake_case_,"""tmp.sql""" )
_A : Any = SqlDatasetReader("""dataset""","""sqlite:///""" + sqlite_path,cache_dir=snake_case_ ).read()
SqlDatasetWriter(snake_case_,"""dataset""","""sqlite:///""" + output_sqlite_path,num_proc=2 ).write()
_A : int = iter_sql_file(snake_case_ )
_A : Optional[int] = iter_sql_file(snake_case_ )
for rowa, rowa in zip(snake_case_,snake_case_ ):
assert rowa == rowa
@require_sqlalchemy
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Union[str, Any] = tmp_path / """cache"""
_A : Union[str, Any] = os.path.join(snake_case_,"""tmp.sql""" )
_A : Dict = SqlDatasetReader("""dataset""","""sqlite:///""" + sqlite_path,cache_dir=snake_case_ ).read()
with pytest.raises(snake_case_ ):
SqlDatasetWriter(snake_case_,"""dataset""","""sqlite:///""" + output_sqlite_path,num_proc=0 ).write()
| 307
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ) -> Optional[int]:
_A : List[Any] = parent
_A : List[Any] = batch_size
_A : Dict = seq_length
_A : Optional[Any] = is_training
_A : int = use_attention_mask
_A : int = use_token_type_ids
_A : List[Any] = use_labels
_A : List[str] = vocab_size
_A : List[Any] = hidden_size
_A : str = num_hidden_layers
_A : Optional[Any] = num_attention_heads
_A : List[Any] = intermediate_size
_A : Any = hidden_act
_A : int = hidden_dropout_prob
_A : int = attention_probs_dropout_prob
_A : List[str] = max_position_embeddings
_A : Optional[int] = type_vocab_size
_A : List[str] = type_sequence_label_size
_A : Dict = initializer_range
_A : List[Any] = num_choices
def a__ ( self ) -> int:
_A : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : Optional[Any] = None
if self.use_attention_mask:
_A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_A : Optional[int] = None
if self.use_token_type_ids:
_A : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a__ ( self ) -> List[str]:
_A : Tuple = self.prepare_config_and_inputs()
_A , _A , _A , _A : str = config_and_inputs
_A : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def a__ ( self ) -> int:
_A : Any = self.prepare_config_and_inputs()
_A , _A , _A , _A : int = config_and_inputs
_A : int = True
_A : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = True
_a = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ ( self ) -> List[Any]:
_A : Optional[Any] = FlaxRobertaModelTester(self )
@slow
def a__ ( self ) -> Optional[int]:
for model_class_name in self.all_model_classes:
_A : Optional[int] = model_class_name.from_pretrained("""roberta-base""" , from_pt=_a )
_A : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
| 307
| 1
|
def lowerCAmelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
__magic_name__ : List[Any] = int(UpperCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(UpperCAmelCase )
__magic_name__ : Union[str, Any] = divmod(UpperCAmelCase, 2 )
return binary_recursive(UpperCAmelCase ) + str(UpperCAmelCase )
def lowerCAmelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
__magic_name__ : Tuple = str(UpperCAmelCase ).strip()
if not number:
raise ValueError('''No input value was provided''' )
__magic_name__ : List[Any] = '''-''' if number.startswith('''-''' ) else ''''''
__magic_name__ : List[str] = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return F'''{negative}0b{binary_recursive(int(UpperCAmelCase ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 704
|
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : torch.FloatTensor
lowerCamelCase__ : Optional[torch.FloatTensor] =None
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase=0.9_99, UpperCAmelCase="cosine", ) ->Optional[Any]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCAmelCase ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__magic_name__ : List[Any] = []
for i in range(UpperCAmelCase ):
__magic_name__ : Tuple = i / num_diffusion_timesteps
__magic_name__ : Optional[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCAmelCase ) / alpha_bar_fn(UpperCAmelCase ), UpperCAmelCase ) )
return torch.tensor(UpperCAmelCase, dtype=torch.floataa )
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Union[str, Any] =1
@register_to_config
def __init__( self , lowerCamelCase = 1000 , lowerCamelCase = 0.0_0_0_1 , lowerCamelCase = 0.0_2 , lowerCamelCase = "linear" , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = True , lowerCamelCase = 0 , lowerCamelCase = "epsilon" , lowerCamelCase = 1.0 , **lowerCamelCase , ) -> Optional[Any]:
"""simple docstring"""
if kwargs.get('''set_alpha_to_one''' , lowerCamelCase ) is not None:
__magic_name__ : Any = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , lowerCamelCase , standard_warn=lowerCamelCase )
__magic_name__ : Tuple = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
__magic_name__ : Any = torch.tensor(lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
__magic_name__ : Union[str, Any] = torch.linspace(lowerCamelCase , lowerCamelCase , lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__magic_name__ : str = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__magic_name__ : List[str] = betas_for_alpha_bar(lowerCamelCase )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
__magic_name__ : Dict = 1.0 - self.betas
__magic_name__ : List[str] = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__magic_name__ : str = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__magic_name__ : int = 1.0
# setable values
__magic_name__ : List[str] = None
__magic_name__ : Dict = torch.from_numpy(np.arange(0 , lowerCamelCase ).copy().astype(np.intaa ) )
def lowercase ( self , lowerCamelCase , lowerCamelCase = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def lowercase ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple:
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
F''' maximal {self.config.num_train_timesteps} timesteps.''' )
__magic_name__ : int = num_inference_steps
__magic_name__ : Tuple = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__magic_name__ : Any = (np.arange(0 , lowerCamelCase ) * step_ratio).round().copy().astype(np.intaa )
__magic_name__ : Optional[int] = torch.from_numpy(lowerCamelCase ).to(lowerCamelCase )
self.timesteps += self.config.steps_offset
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
__magic_name__ : Dict = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__magic_name__ : List[Any] = self.alphas_cumprod[timestep]
__magic_name__ : str = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__magic_name__ : Any = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__magic_name__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__magic_name__ : Union[str, Any] = model_output
elif self.config.prediction_type == "sample":
__magic_name__ : Dict = model_output
__magic_name__ : Optional[int] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__magic_name__ : Optional[int] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__magic_name__ : Optional[int] = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__magic_name__ : Any = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__magic_name__ : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__magic_name__ : Optional[Any] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCamelCase , pred_original_sample=lowerCamelCase )
def __len__( self ) -> int:
"""simple docstring"""
return self.config.num_train_timesteps
| 336
| 0
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
a_ : Tuple = logging.get_logger(__name__)
class _snake_case ( A__ ):
def __init__( self , *a , **a) -> None:
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , a , )
super().__init__(*a , **a)
| 73
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _UpperCamelCase ( __snake_case):
__lowerCamelCase = "xlm-roberta-xl"
def __init__(self , lowerCamelCase__=2_5_0_8_8_0 , lowerCamelCase__=2_5_6_0 , lowerCamelCase__=3_6 , lowerCamelCase__=3_2 , lowerCamelCase__=1_0_2_4_0 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_4 , lowerCamelCase__=1 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1E-05 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__="absolute" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class _UpperCamelCase ( __snake_case):
@property
def A (self ):
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 574
| 0
|
from __future__ import annotations
import numpy as np
def A(__a: list[float] ):
return np.maximum(0 , __a )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 716
|
import argparse
import os
import re
lowerCamelCase__ = '''src/transformers'''
# Pattern that looks at the indentation in a line.
lowerCamelCase__ = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCamelCase__ = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCamelCase__ = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCamelCase__ = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCamelCase__ = re.compile(R'''\[([^\]]+)\]''')
def A(__a: List[Any] ):
lowerCAmelCase_ = _re_indent.search(__a )
return "" if search is None else search.groups()[0]
def A(__a: Optional[Any] , __a: Optional[Any]="" , __a: Optional[int]=None , __a: Optional[int]=None ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(__a ):
index += 1
lowerCAmelCase_ = ["\n".join(lines[:index] )]
else:
lowerCAmelCase_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase_ = [lines[index]]
index += 1
while index < len(__a ) and (end_prompt is None or not lines[index].startswith(__a )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__a ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(__a ) )
if index < len(__a ) - 1:
lowerCAmelCase_ = [lines[index + 1]]
index += 1
else:
lowerCAmelCase_ = []
else:
blocks.append("\n".join(__a ) )
lowerCAmelCase_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__a ) > 0:
blocks.append("\n".join(__a ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__a ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def A(__a: Tuple ):
def _inner(__a: Optional[int] ):
return key(__a ).lower().replace("_" , "" )
return _inner
def A(__a: str , __a: Optional[Any]=None ):
# If no key is provided, we use a noop.
def noop(__a: List[Any] ):
return x
if key is None:
lowerCAmelCase_ = noop
# Constants are all uppercase, they go first.
lowerCAmelCase_ = [obj for obj in objects if key(__a ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase_ = [obj for obj in objects if key(__a )[0].isupper() and not key(__a ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase_ = [obj for obj in objects if not key(__a )[0].isupper()]
lowerCAmelCase_ = ignore_underscore(__a )
return sorted(__a , key=__a ) + sorted(__a , key=__a ) + sorted(__a , key=__a )
def A(__a: Dict ):
# This inner function sort imports between [ ].
def _replace(__a: Any ):
lowerCAmelCase_ = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
lowerCAmelCase_ = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase_ = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(__a )] ) + "]"
lowerCAmelCase_ = import_statement.split("\n" )
if len(__a ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase_ = 2 if lines[1].strip() == "[" else 1
lowerCAmelCase_ = [(i, _re_strip_line.search(__a ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase_ = sort_objects(__a , key=lambda __a : x[1] )
lowerCAmelCase_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__a ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase_ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase_ = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase_ = keys[:-1]
lowerCAmelCase_ = get_indent(lines[1] ) + ", ".join([F"\"{k}\"" for k in sort_objects(__a )] )
return "\n".join(__a )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase_ = _re_bracket_content.sub(_replace , __a )
return import_statement
def A(__a: Union[str, Any] , __a: str=True ):
with open(__a , encoding="utf-8" ) as f:
lowerCAmelCase_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase_ = split_code_in_indented_blocks(
__a , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__a ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase_ = main_blocks[block_idx]
lowerCAmelCase_ = block.split("\n" )
# Get to the start of the imports.
lowerCAmelCase_ = 0
while line_idx < len(__a ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase_ = len(__a )
else:
line_idx += 1
if line_idx >= len(__a ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase_ = "\n".join(block_lines[line_idx:-1] )
lowerCAmelCase_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase_ = split_code_in_indented_blocks(__a , indent_level=__a )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase_ = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase_ = [(pattern.search(__a ).groups()[0] if pattern.search(__a ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase_ = [(i, key) for i, key in enumerate(__a ) if key is not None]
lowerCAmelCase_ = [x[0] for x in sorted(__a , key=lambda __a : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase_ = 0
lowerCAmelCase_ = []
for i in range(len(__a ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(__a )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase_ = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(__a ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write("\n".join(__a ) )
def A(__a: Any=True ):
lowerCAmelCase_ = []
for root, _, files in os.walk(__a ):
if "__init__.py" in files:
lowerCAmelCase_ = sort_imports(os.path.join(__a , "__init__.py" ) , check_only=__a )
if result:
lowerCAmelCase_ = [os.path.join(__a , "__init__.py" )]
if len(__a ) > 0:
raise ValueError(F"Would overwrite {len(__a )} files, run `make style`." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCamelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 226
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( A_ ):
A__ : UNetaDModel
A__ : ScoreSdeVeScheduler
def __init__(self : List[Any] , snake_case__ : UNetaDModel , snake_case__ : ScoreSdeVeScheduler ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=snake_case__ , scheduler=snake_case__ )
@torch.no_grad()
def __call__(self : Optional[int] , snake_case__ : int = 1 , snake_case__ : int = 20_00 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , **snake_case__ : Any , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
snake_case : Dict = self.unet.config.sample_size
snake_case : List[str] = (batch_size, 3, img_size, img_size)
snake_case : Any = self.unet
snake_case : List[str] = randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma
snake_case : List[str] = sample.to(self.device )
self.scheduler.set_timesteps(snake_case__ )
self.scheduler.set_sigmas(snake_case__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
snake_case : int = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
snake_case : List[str] = self.unet(snake_case__ , snake_case__ ).sample
snake_case : Tuple = self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# prediction step
snake_case : Union[str, Any] = model(snake_case__ , snake_case__ ).sample
snake_case : Tuple = self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ )
snake_case , snake_case : List[Any] = output.prev_sample, output.prev_sample_mean
snake_case : str = sample_mean.clamp(0 , 1 )
snake_case : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Optional[Any] = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=snake_case__ )
| 204
|
def UpperCamelCase ( __lowerCamelCase : int = 1 , __lowerCamelCase : int = 1000 ):
snake_case : int = 1
snake_case : int = 0
for divide_by_number in range(__lowerCamelCase , digit + 1 ):
snake_case : list[int] = []
snake_case : Optional[int] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowerCamelCase ):
snake_case : List[Any] = len(__lowerCamelCase )
snake_case : List[str] = divide_by_number
else:
has_been_divided.append(__lowerCamelCase )
snake_case : Any = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_lowerCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = ['''pixel_values''']
def __init__( self , A__ = True , A__ = None , A__ = PILImageResampling.BICUBIC , A__ = True , A__ = None , A__ = True , A__ = 1 / 255 , A__ = True , A__ = None , A__ = None , A__ = True , **A__ , ):
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCAmelCase_: Optional[Any] = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_: Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase_: Any = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_: Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size" )
UpperCAmelCase_: int = do_resize
UpperCAmelCase_: List[Any] = size
UpperCAmelCase_: str = resample
UpperCAmelCase_: Optional[int] = do_center_crop
UpperCAmelCase_: Dict = crop_size
UpperCAmelCase_: Optional[int] = do_rescale
UpperCAmelCase_: List[Any] = rescale_factor
UpperCAmelCase_: Dict = do_normalize
UpperCAmelCase_: List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_: Tuple = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_: Any = do_convert_rgb
def snake_case_ ( self , A__ , A__ , A__ = PILImageResampling.BICUBIC , A__ = None , **A__ , ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
UpperCAmelCase_: Optional[int] = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def snake_case_ ( self , A__ , A__ , A__ = None , **A__ , ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ )
def snake_case_ ( self , A__ , A__ , A__ = None , **A__ , ):
"""simple docstring"""
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def snake_case_ ( self , A__ , A__ , A__ , A__ = None , **A__ , ):
"""simple docstring"""
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def snake_case_ ( self , A__ , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = ChannelDimension.FIRST , **A__ , ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_: int = size if size is not None else self.size
UpperCAmelCase_: Tuple = get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_ )
UpperCAmelCase_: Any = resample if resample is not None else self.resample
UpperCAmelCase_: Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_: List[str] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_: Any = get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_ )
UpperCAmelCase_: int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_: Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_: List[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_: Optional[int] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_: List[str] = image_std if image_std is not None else self.image_std
UpperCAmelCase_: List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_: Optional[Any] = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_: Any = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_: int = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
UpperCAmelCase_: str = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
UpperCAmelCase_: Optional[Any] = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
UpperCAmelCase_: Optional[int] = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
UpperCAmelCase_: int = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
UpperCAmelCase_: Tuple = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase_: List[str] = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 716
|
def lowercase ( _a ) -> bool:
if not isinstance(_a ,_a ):
UpperCAmelCase_: Dict = f"Input value of [number={number}] must be an integer"
raise TypeError(_a )
if number < 0:
return False
UpperCAmelCase_: Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> bool:
_snake_case = len(__A )
_snake_case = len(__A )
_snake_case = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_snake_case = True
for i in range(__A ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_snake_case = True
if a[i].islower():
_snake_case = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 495
|
'''simple docstring'''
import functools
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> int:
# Validation
if not isinstance(__A , __A ) or not all(isinstance(__A , __A ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(__A ) != 3 or not all(isinstance(__A , __A ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(__A ) == 0:
return 0
if min(__A ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(__A ) >= 366:
raise ValueError('All days elements should be less than 366' )
_snake_case = set(__A )
@functools.cache
def dynamic_programming(__A ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 495
| 1
|
'''simple docstring'''
import math
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : float = 1 / 12345 ):
'''simple docstring'''
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 3
while True:
_lowerCAmelCase = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
total_partitions += 1
if check_partition_perfect(SCREAMING_SNAKE_CASE_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(SCREAMING_SNAKE_CASE_ )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 489
|
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("check_bouncy() accepts only integer arguments" )
_lowerCAmelCase = str(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = "".join(sorted(SCREAMING_SNAKE_CASE_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __a(SCREAMING_SNAKE_CASE_ : float = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
_lowerCAmelCase = 0
_lowerCAmelCase = 1
while True:
if check_bouncy(SCREAMING_SNAKE_CASE_ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 489
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase__ : Any = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def a__ ( lowercase : Tuple, lowercase : Optional[int], lowercase : Any=None, lowercase : Optional[Any]=None, lowercase : Dict=None, lowercase : List[str]=None, lowercase : Any=None, lowercase : Tuple=None, ) -> Tuple:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase = np.where(input_ids != config.pad_token_id, 1, 0 )
if decoder_attention_mask is None:
_UpperCamelCase = np.where(decoder_input_ids != config.pad_token_id, 1, 0 )
if head_mask is None:
_UpperCamelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCamelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str]=13 , lowerCAmelCase__ : str=7 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : Tuple=99 , lowerCAmelCase__ : Dict=16 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : Tuple=4 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Dict=32 , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : Tuple=0 , lowerCAmelCase__ : List[Any]=0.02 , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
_UpperCamelCase = initializer_range
def snake_case__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_UpperCamelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , 1 , 2 )
_UpperCamelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , )
_UpperCamelCase = prepare_blenderbot_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def snake_case__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case__ ( self : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(lowerCAmelCase__ )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] )
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase__ , )
_UpperCamelCase = model.decode(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def snake_case__ ( self : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(lowerCAmelCase__ )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] )
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
_UpperCamelCase = model.decode(lowerCAmelCase__ , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ )
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : List[Any] = 9_9
def snake_case__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_UpperCamelCase = input_ids.shape[0]
_UpperCamelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def snake_case__ ( self : Dict ) -> List[str]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_config_and_data()
_UpperCamelCase = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase__ )
_UpperCamelCase = lm_model(input_ids=lowerCAmelCase__ )
_UpperCamelCase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_UpperCamelCase = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase__ )
_UpperCamelCase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_UpperCamelCase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_UpperCamelCase = lm_model(input_ids=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ )
_UpperCamelCase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowerCAmelCase__ )
def snake_case__ ( self : List[str] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , 1 , 2 )
_UpperCamelCase = np.equal(lowerCAmelCase__ , 1 ).astype(np.floataa ).sum()
_UpperCamelCase = np.equal(lowerCAmelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCAmelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase , __magic_name__ ):
"""simple docstring"""
_snake_case : Dict = True
_snake_case : str = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_snake_case : Tuple = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = FlaxBlenderbotModelTester(self )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = model_class(lowerCAmelCase__ )
@jax.jit
def encode_jitted(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict=None , **lowerCAmelCase__ : Optional[Any] ):
return model.encode(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase = encode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase = encode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self : Dict ) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_UpperCamelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] ):
return model.decode(
decoder_input_ids=lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , encoder_outputs=lowerCAmelCase__ , )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase = decode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase = decode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case__ ( self : Dict ) -> Any:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_UpperCamelCase = np.ones((1, 1) ) * model.config.eos_token_id
_UpperCamelCase = model(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def snake_case__ ( self : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
_UpperCamelCase = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
_UpperCamelCase = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=lowerCAmelCase__ )
_UpperCamelCase = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
_UpperCamelCase = ['''Sam''']
_UpperCamelCase = tokenizer(lowerCAmelCase__ , return_tensors='''jax''' )
_UpperCamelCase = model.generate(**lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = '''Sam is a great name. It means "sun" in Gaelic.'''
_UpperCamelCase = tokenizer.batch_decode(lowerCAmelCase__ , **lowerCAmelCase__ )
assert generated_txt[0].strip() == tgt_text
| 98
|
"""simple docstring"""
from __future__ import annotations
def snake_case ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> list[list[int]]:
lowerCamelCase : list[list[int]] = []
lowerCamelCase : list[int] = []
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : Dict = sum(UpperCamelCase__ )
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return result
def snake_case ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , ) -> None:
if sum(UpperCamelCase__ ) > max_sum or (remaining_nums_sum + sum(UpperCamelCase__ )) < max_sum:
return
if sum(UpperCamelCase__ ) == max_sum:
result.append(UpperCamelCase__ )
return
for index in range(UpperCamelCase__ , len(UpperCamelCase__ ) ):
create_state_space_tree(
UpperCamelCase__ , UpperCamelCase__ , index + 1 , [*path, nums[index]] , UpperCamelCase__ , remaining_nums_sum - nums[index] , )
__lowerCamelCase :Dict = [3, 34, 4, 12, 5, 2]
__lowerCamelCase :int = 9
__lowerCamelCase :Union[str, Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 222
| 0
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
if is_sentencepiece_available():
import sentencepiece as sp
__UpperCAmelCase : str = 5
__UpperCAmelCase : Union[str, Any] = 10
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCAmelCase : Union[str, Any] = SpeechaTextTokenizer
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : int = True
def snake_case_ ( self : Dict ) -> Any:
super().setUp()
_a : str = sp.SentencePieceProcessor()
spm_model.Load(__snake_case )
_a : List[str] = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__snake_case ) )]
_a : List[Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
_a : Any = Path(self.tmpdirname )
save_json(__snake_case , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
_a : Optional[Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self : Tuple ) -> Dict:
_a : str = '''<pad>'''
_a : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def snake_case_ ( self : List[str] ) -> Union[str, Any]:
_a : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__snake_case ) , 1001 )
def snake_case_ ( self : Optional[Any] ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def snake_case_ ( self : List[str] ) -> List[Any]:
_a : Optional[int] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
_a : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [289, 50, 14, 174, 386] , )
_a : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
_a : List[str] = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(__snake_case , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
_a : List[Any] = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def snake_case_ ( self : Optional[int] ) -> Dict:
# fmt: off
_a : Dict = {'''input_ids''': [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , )
@require_sentencepiece
class lowerCamelCase ( unittest.TestCase ):
UpperCAmelCase : List[Any] = 'valhalla/s2t_mustc_multilinguial_medium'
UpperCAmelCase : str = 'C\'est trop cool'
UpperCAmelCase : Any = 'Esto es genial'
@classmethod
def snake_case_ ( cls : List[str] ) -> Any:
_a : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def snake_case_ ( self : str ) -> str:
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 11 )
def snake_case_ ( self : Union[str, Any] ) -> List[str]:
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def snake_case_ ( self : int ) -> Dict:
self.assertIn(__snake_case , self.tokenizer.all_special_ids )
_a : Tuple = [ES_CODE, 4, 1601, 47, 7647, 2]
_a : str = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case )
_a : Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertNotIn(self.tokenizer.eos_token , __snake_case )
def snake_case_ ( self : List[Any] ) -> Optional[int]:
_a : int = '''fr'''
_a : str = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __snake_case )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def snake_case_ ( self : List[str] ) -> Tuple:
_a : Any = '''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
_a : List[str] = '''es'''
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 700
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCamelCase :
def __init__( self : Optional[Any] , __snake_case : Tuple , __snake_case : Tuple=2 , __snake_case : List[str]=32 , __snake_case : List[str]=16 , __snake_case : Optional[Any]=3 , __snake_case : List[str]=True , __snake_case : str=True , __snake_case : Optional[Any]=32 , __snake_case : Optional[int]=4 , __snake_case : str=[0, 1, 2, 3] , __snake_case : List[str]=4 , __snake_case : int=37 , __snake_case : int="gelu" , __snake_case : Tuple=0.1 , __snake_case : int=0.1 , __snake_case : List[str]=0.02 , __snake_case : Any=3 , __snake_case : Tuple=[1, 384, 24, 24] , __snake_case : List[Any]=True , __snake_case : List[Any]=None , ) -> str:
_a : List[Any] = parent
_a : str = batch_size
_a : Dict = image_size
_a : str = patch_size
_a : Union[str, Any] = num_channels
_a : Dict = is_training
_a : Union[str, Any] = use_labels
_a : List[str] = hidden_size
_a : Dict = num_hidden_layers
_a : List[str] = backbone_out_indices
_a : Any = num_attention_heads
_a : str = intermediate_size
_a : List[Any] = hidden_act
_a : Dict = hidden_dropout_prob
_a : str = attention_probs_dropout_prob
_a : Tuple = initializer_range
_a : Dict = num_labels
_a : Any = backbone_featmap_shape
_a : List[Any] = scope
_a : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_a : str = (image_size // patch_size) ** 2
_a : Union[str, Any] = num_patches + 1
def snake_case_ ( self : Any ) -> Optional[int]:
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Any = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_a : Optional[int] = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self : Union[str, Any] ) -> List[str]:
_a : Optional[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__snake_case , backbone_featmap_shape=self.backbone_featmap_shape , )
def snake_case_ ( self : List[Any] , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Dict ) -> int:
_a : Optional[int] = DPTModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : List[Any] = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self : List[str] , __snake_case : Optional[int] , __snake_case : int , __snake_case : Optional[Any] ) -> Any:
_a : Any = self.num_labels
_a : int = DPTForDepthEstimation(__snake_case )
model.to(__snake_case )
model.eval()
_a : Optional[int] = model(__snake_case )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def snake_case_ ( self : Any , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Dict ) -> Optional[Any]:
_a : Optional[Any] = self.num_labels
_a : Any = DPTForSemanticSegmentation(__snake_case )
model.to(__snake_case )
model.eval()
_a : Optional[int] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case_ ( self : Union[str, Any] ) -> List[Any]:
_a : int = self.prepare_config_and_inputs()
_a , _a , _a : Any = config_and_inputs
_a : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCAmelCase : List[str] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCAmelCase : Tuple = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase : int = False
UpperCAmelCase : str = False
UpperCAmelCase : Dict = False
def snake_case_ ( self : Union[str, Any] ) -> List[Any]:
_a : Union[str, Any] = DPTModelTester(self )
_a : Optional[int] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case_ ( self : List[Any] ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def snake_case_ ( self : str ) -> str:
pass
def snake_case_ ( self : int ) -> Optional[int]:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case_ ( self : int ) -> Any:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(__snake_case )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case_ ( self : int ) -> Any:
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case_ ( self : List[Any] ) -> Optional[int]:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__snake_case )
def snake_case_ ( self : int ) -> Any:
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__snake_case )
def snake_case_ ( self : Union[str, Any] ) -> List[str]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Tuple = True
if model_class in get_values(__snake_case ):
continue
_a : List[str] = model_class(__snake_case )
model.to(__snake_case )
model.train()
_a : Tuple = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
_a : Tuple = model(**__snake_case ).loss
loss.backward()
def snake_case_ ( self : Any ) -> Union[str, Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_a : str = False
_a : int = True
if model_class in get_values(__snake_case ) or not model_class.supports_gradient_checkpointing:
continue
_a : Tuple = model_class(__snake_case )
model.to(__snake_case )
model.gradient_checkpointing_enable()
model.train()
_a : str = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
_a : Tuple = model(**__snake_case ).loss
loss.backward()
def snake_case_ ( self : Tuple ) -> Optional[Any]:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Optional[int] = _config_zero_init(__snake_case )
for model_class in self.all_model_classes:
_a : Tuple = model_class(config=__snake_case )
# Skip the check for the backbone
_a : List[str] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_a : str = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case_ ( self : Dict ) -> Optional[Any]:
pass
@slow
def snake_case_ ( self : str ) -> str:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_a : Optional[int] = DPTModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def snake_case_ ( self : Any ) -> Union[str, Any]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Tuple = '''add'''
with self.assertRaises(__snake_case ):
_a : List[str] = DPTForDepthEstimation(__snake_case )
def lowerCamelCase_ ( ):
_a : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase ( unittest.TestCase ):
def snake_case_ ( self : Any ) -> Optional[Any]:
_a : Any = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
_a : Union[str, Any] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(__snake_case )
_a : Optional[int] = prepare_img()
_a : Any = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
_a : List[str] = model(**__snake_case )
_a : Optional[Any] = outputs.predicted_depth
# verify the predicted depth
_a : Optional[Any] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __snake_case )
_a : int = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __snake_case , atol=1E-4 ) )
| 249
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : List[str] = tempfile.mkdtemp()
# fmt: off
A_ : List[Any] = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A_ : List[Any] = dict(zip(lowercase , range(len(lowercase ) ) ) )
A_ : str = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A_ : int = {"""unk_token""": """<unk>"""}
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase ) )
A_ : Dict = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"""image_std""": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
A_ : int = os.path.join(self.tmpdirname , lowercase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowercase , lowercase )
def _a (self , **lowercase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **lowercase )
def _a (self , **lowercase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **lowercase )
def _a (self , **lowercase ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase )
def _a (self ):
shutil.rmtree(self.tmpdirname )
def _a (self ):
A_ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
A_ : Any = self.get_tokenizer()
A_ : str = self.get_rust_tokenizer()
A_ : List[str] = self.get_image_processor()
A_ : List[str] = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase )
processor_slow.save_pretrained(self.tmpdirname )
A_ : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase )
A_ : int = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase )
processor_fast.save_pretrained(self.tmpdirname )
A_ : Tuple = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase )
self.assertIsInstance(processor_fast.tokenizer , lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase )
self.assertIsInstance(processor_fast.image_processor , lowercase )
def _a (self ):
A_ : int = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Any = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : int = self.get_image_processor(do_normalize=lowercase )
A_ : Tuple = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def _a (self ):
A_ : Union[str, Any] = self.get_image_processor()
A_ : int = self.get_tokenizer()
A_ : Any = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : List[str] = image_processor(lowercase , return_tensors="""np""" )
A_ : Tuple = processor(images=lowercase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ):
A_ : Optional[Any] = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Tuple = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : str = """lower newer"""
A_ : Union[str, Any] = processor(text=lowercase , return_tensors="""np""" )
A_ : Tuple = tokenizer(lowercase , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _a (self ):
A_ : Optional[int] = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : Dict = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : Union[str, Any] = """lower newer"""
A_ : Dict = self.prepare_image_inputs()
A_ : List[Any] = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : Dict = """google/owlvit-base-patch32"""
A_ : Dict = OwlViTProcessor.from_pretrained(lowercase )
A_ : List[Any] = ["""cat""", """nasa badge"""]
A_ : List[Any] = processor(text=lowercase )
A_ : Union[str, Any] = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : str = """google/owlvit-base-patch32"""
A_ : Union[str, Any] = OwlViTProcessor.from_pretrained(lowercase )
A_ : Optional[Any] = [["""cat""", """nasa badge"""], ["""person"""]]
A_ : Tuple = processor(text=lowercase )
A_ : str = 16
A_ : int = len(lowercase )
A_ : int = max([len(lowercase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : Optional[int] = """google/owlvit-base-patch32"""
A_ : Any = OwlViTProcessor.from_pretrained(lowercase )
A_ : Union[str, Any] = ["""cat""", """nasa badge"""]
A_ : Optional[int] = processor(text=lowercase )
A_ : str = 16
A_ : Optional[int] = inputs["""input_ids"""]
A_ : List[Any] = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _a (self ):
A_ : Dict = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : Dict = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : int = self.prepare_image_inputs()
A_ : List[str] = self.prepare_image_inputs()
A_ : Dict = processor(images=lowercase , query_images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : str = self.get_image_processor()
A_ : List[str] = self.get_tokenizer()
A_ : Any = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : str = processor.batch_decode(lowercase )
A_ : Union[str, Any] = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
| 667
|
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
A_ : int = number_of_bytes // partitions
A_ : Union[str, Any] = []
for i in range(lowerCamelCase__ ):
A_ : Dict = i * bytes_per_partition + 1
A_ : Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667
| 1
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Dict ):
snake_case__ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case__ : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowercase )
snake_case__ : Optional[Any] = -1
snake_case__ : Tuple = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(__lowercase )
snake_case__ : int = model.generate(__lowercase ,max_new_tokens=1_0 ,do_sample=__lowercase )
snake_case__ : Dict = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
snake_case__ : Any = TextStreamer(__lowercase )
model.generate(__lowercase ,max_new_tokens=1_0 ,do_sample=__lowercase ,streamer=__lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
snake_case__ : str = cs.out[:-1]
self.assertEqual(__lowercase ,__lowercase )
def __lowerCamelCase ( self :Any ):
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case__ : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowercase )
snake_case__ : Optional[int] = -1
snake_case__ : int = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(__lowercase )
snake_case__ : Optional[Any] = model.generate(__lowercase ,max_new_tokens=1_0 ,do_sample=__lowercase )
snake_case__ : Optional[int] = tokenizer.decode(greedy_ids[0] )
snake_case__ : str = TextIteratorStreamer(__lowercase )
snake_case__ : List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
snake_case__ : List[str] = Thread(target=model.generate ,kwargs=__lowercase )
thread.start()
snake_case__ : int = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__lowercase ,__lowercase )
def __lowerCamelCase ( self :Tuple ):
snake_case__ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case__ : int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowercase )
snake_case__ : Any = -1
snake_case__ : str = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(__lowercase )
snake_case__ : Union[str, Any] = model.generate(__lowercase ,max_new_tokens=1_0 ,do_sample=__lowercase )
snake_case__ : Union[str, Any] = greedy_ids[:, input_ids.shape[1] :]
snake_case__ : str = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
snake_case__ : Dict = TextStreamer(__lowercase ,skip_prompt=__lowercase )
model.generate(__lowercase ,max_new_tokens=1_0 ,do_sample=__lowercase ,streamer=__lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
snake_case__ : Optional[Any] = cs.out[:-1]
self.assertEqual(__lowercase ,__lowercase )
def __lowerCamelCase ( self :List[Any] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
snake_case__ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
snake_case__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowercase )
snake_case__ : str = -1
snake_case__ : Any = torch.ones((1, 5) ,device=__lowercase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
snake_case__ : Any = TextStreamer(__lowercase ,skip_special_tokens=__lowercase )
model.generate(__lowercase ,max_new_tokens=1 ,do_sample=__lowercase ,streamer=__lowercase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
snake_case__ : Optional[Any] = cs.out[:-1] # Remove the final "\n"
snake_case__ : Optional[int] = tokenizer(__lowercase ,return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def __lowerCamelCase ( self :Any ):
snake_case__ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case__ : Optional[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowercase )
snake_case__ : Union[str, Any] = -1
snake_case__ : int = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(__lowercase )
snake_case__ : int = TextIteratorStreamer(__lowercase ,timeout=0.001 )
snake_case__ : Optional[int] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
snake_case__ : List[Any] = Thread(target=model.generate ,kwargs=__lowercase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__lowercase ):
snake_case__ : List[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 708
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
A__ = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(__lowerCAmelCase ) , version.parse(__lowerCAmelCase ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase = None ) -> None:
"""simple docstring"""
snake_case__ : List[str] = f"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(r'''^[\w_\-\d]+$''' , __lowerCAmelCase ):
snake_case__ , snake_case__ , snake_case__ : Tuple = requirement, None, None
else:
snake_case__ : Union[str, Any] = re.findall(r'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , __lowerCAmelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f""" got {requirement}""" )
snake_case__ , snake_case__ : int = match[0]
snake_case__ : List[str] = want_full.split(''',''' ) # there could be multiple requirements
snake_case__ : Tuple = {}
for w in want_range:
snake_case__ : str = re.findall(r'''^([\s!=<>]{1,2})(.+)''' , __lowerCAmelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f""" but got {requirement}""" )
snake_case__ , snake_case__ : List[Any] = match[0]
snake_case__ : Any = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
snake_case__ : Dict = '''.'''.join([str(__lowerCAmelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return
# check if any version is installed
try:
snake_case__ : List[Any] = importlib.metadata.version(__lowerCAmelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase ) -> int:
"""simple docstring"""
snake_case__ : Any = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(__lowerCAmelCase , __lowerCAmelCase )
| 219
| 0
|
from ... import PretrainedConfig
A_ = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class __lowercase ( A_ ):
lowercase = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowercase = 'nezha'
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[Any]=2_11_28 , __lowerCamelCase : Optional[int]=7_68 , __lowerCamelCase : List[str]=12 , __lowerCamelCase : int=12 , __lowerCamelCase : List[Any]=30_72 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Dict=5_12 , __lowerCamelCase : Dict=64 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Union[str, Any]=1E-12 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : str=True , **__lowerCamelCase : str , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = max_relative_position
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = classifier_dropout
lowercase = use_cache
| 604
|
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any]="ro" , __lowerCamelCase : Optional[Any]="en" , __lowerCamelCase : Optional[int]="wmt16" , __lowerCamelCase : Tuple=None ) -> None:
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
_snake_case = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
_snake_case = datasets.load_dataset(__lowerCamelCase , __lowerCamelCase )
if save_dir is None:
_snake_case = f'''{dataset}-{pair}'''
_snake_case = Path(__lowerCamelCase )
save_dir.mkdir(exist_ok=__lowerCamelCase )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
_snake_case = '''val''' if split == '''validation''' else split
_snake_case = save_dir.joinpath(f'''{fn}.source''' )
_snake_case = save_dir.joinpath(f'''{fn}.target''' )
_snake_case = src_path.open('''w+''' )
_snake_case = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
_snake_case = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 224
| 0
|
def _lowerCAmelCase ( __lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[Any] = len(__lowerCAmelCase )
snake_case__ : Any = sum(__lowerCAmelCase )
snake_case__ : str = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
snake_case__ : str = True
for i in range(1 , s + 1 ):
snake_case__ : Tuple = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
snake_case__ : Optional[Any] = dp[i][j - 1]
if arr[i - 1] <= j:
snake_case__ : List[Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
snake_case__ : Optional[Any] = s - 2 * j
break
return diff
| 701
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a ( __lowerCamelCase ):
__lowerCAmelCase : List[str] = """Speech2TextFeatureExtractor"""
__lowerCAmelCase : List[str] = """Speech2TextTokenizer"""
def __init__( self :List[str] ,__lowercase :Union[str, Any] ,__lowercase :Any ):
super().__init__(__lowercase ,__lowercase )
snake_case__ : Any = self.feature_extractor
snake_case__ : Union[str, Any] = False
def __call__( self :Dict ,*__lowercase :Dict ,**__lowercase :Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowercase ,**__lowercase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
snake_case__ : List[Any] = kwargs.pop('''raw_speech''' )
else:
snake_case__ : Optional[Any] = kwargs.pop('''audio''' ,__lowercase )
snake_case__ : Tuple = kwargs.pop('''sampling_rate''' ,__lowercase )
snake_case__ : Dict = kwargs.pop('''text''' ,__lowercase )
if len(__lowercase ) > 0:
snake_case__ : List[Any] = args[0]
snake_case__ : Dict = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
snake_case__ : Tuple = self.feature_extractor(__lowercase ,*__lowercase ,sampling_rate=__lowercase ,**__lowercase )
if text is not None:
snake_case__ : List[Any] = self.tokenizer(__lowercase ,**__lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case__ : int = encodings['''input_ids''']
return inputs
def __lowerCamelCase ( self :List[Any] ,*__lowercase :int ,**__lowercase :List[str] ):
return self.tokenizer.batch_decode(*__lowercase ,**__lowercase )
def __lowerCamelCase ( self :List[Any] ,*__lowercase :Optional[Any] ,**__lowercase :str ):
return self.tokenizer.decode(*__lowercase ,**__lowercase )
@contextmanager
def __lowerCamelCase ( self :int ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
snake_case__ : Dict = True
snake_case__ : Dict = self.tokenizer
yield
snake_case__ : int = self.feature_extractor
snake_case__ : List[str] = False
| 219
| 0
|
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_A = """src/diffusers"""
_A = """."""
# This is to make sure the diffusers module imported is the one in the repo.
_A = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
_A = spec.loader.load_module()
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
return line.startswith(__UpperCAmelCase ) or len(__UpperCAmelCase ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , __UpperCAmelCase ) is not None
def lowercase_ ( __UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Any = object_name.split(""".""" )
lowerCAmelCase__ : Optional[int] = 0
# First let's find the module where our object lives.
lowerCAmelCase__ : Dict = parts[i]
while i < len(__UpperCAmelCase ) and not os.path.isfile(os.path.join(__UpperCAmelCase , f"""{module}.py""" ) ):
i += 1
if i < len(__UpperCAmelCase ):
lowerCAmelCase__ : int = os.path.join(__UpperCAmelCase , parts[i] )
if i >= len(__UpperCAmelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__UpperCAmelCase , f"""{module}.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCAmelCase__ : Optional[int] = f.readlines()
# Now let's find the class / func in the code!
lowerCAmelCase__ : str = """"""
lowerCAmelCase__ : List[str] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__UpperCAmelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__UpperCAmelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCAmelCase__ : Tuple = line_index
while line_index < len(__UpperCAmelCase ) and _should_continue(lines[line_index] , __UpperCAmelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCAmelCase__ : int = lines[start_index:line_index]
return "".join(__UpperCAmelCase )
_A = re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
_A = re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""")
_A = re.compile(r"""<FILL\s+[^>]*>""")
def lowercase_ ( __UpperCAmelCase ) -> Any:
lowerCAmelCase__ : Union[str, Any] = code.split("""\n""" )
lowerCAmelCase__ : str = 0
while idx < len(__UpperCAmelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__UpperCAmelCase ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def lowercase_ ( __UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : List[Any] = len(get_indent(__UpperCAmelCase ) ) > 0
if has_indent:
lowerCAmelCase__ : Optional[Any] = f"""class Bla:\n{code}"""
lowerCAmelCase__ : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__UpperCAmelCase )
lowerCAmelCase__ : Tuple = black.format_str(__UpperCAmelCase , mode=__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = style_docstrings_in_code(__UpperCAmelCase )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase=False ) -> Optional[int]:
with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCAmelCase__ : Any = f.readlines()
lowerCAmelCase__ : int = []
lowerCAmelCase__ : List[str] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__UpperCAmelCase ):
lowerCAmelCase__ : List[str] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = search.groups()
lowerCAmelCase__ : Union[str, Any] = find_code_in_diffusers(__UpperCAmelCase )
lowerCAmelCase__ : int = get_indent(__UpperCAmelCase )
lowerCAmelCase__ : Dict = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCAmelCase__ : List[Any] = theoretical_indent
lowerCAmelCase__ : str = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCAmelCase__ : Tuple = True
while line_index < len(__UpperCAmelCase ) and should_continue:
line_index += 1
if line_index >= len(__UpperCAmelCase ):
break
lowerCAmelCase__ : Optional[Any] = lines[line_index]
lowerCAmelCase__ : Dict = _should_continue(__UpperCAmelCase , __UpperCAmelCase ) and re.search(f"""^{indent}# End copy""" , __UpperCAmelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCAmelCase__ : List[Any] = lines[start_index:line_index]
lowerCAmelCase__ : str = """""".join(__UpperCAmelCase )
# Remove any nested `Copied from` comments to avoid circular copies
lowerCAmelCase__ : Dict = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(__UpperCAmelCase ) is None]
lowerCAmelCase__ : int = """\n""".join(__UpperCAmelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__UpperCAmelCase ) > 0:
lowerCAmelCase__ : Any = replace_pattern.replace("""with""" , """""" ).split(""",""" )
lowerCAmelCase__ : int = [_re_replace_pattern.search(__UpperCAmelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = pattern.groups()
lowerCAmelCase__ : int = re.sub(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if option.strip() == "all-casing":
lowerCAmelCase__ : Any = re.sub(obja.lower() , obja.lower() , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = re.sub(obja.upper() , obja.upper() , __UpperCAmelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCAmelCase__ : Tuple = blackify(lines[start_index - 1] + theoretical_code )
lowerCAmelCase__ : Tuple = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowerCAmelCase__ : Union[str, Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCAmelCase__ : int = start_index + 1
if overwrite and len(__UpperCAmelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__UpperCAmelCase )
return diffs
def lowercase_ ( __UpperCAmelCase = False ) -> Union[str, Any]:
lowerCAmelCase__ : Tuple = glob.glob(os.path.join(__UpperCAmelCase , """**/*.py""" ) , recursive=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = []
for filename in all_files:
lowerCAmelCase__ : int = is_copy_consistent(__UpperCAmelCase , __UpperCAmelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__UpperCAmelCase ) > 0:
lowerCAmelCase__ : Optional[Any] = """\n""".join(__UpperCAmelCase )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_A = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 299
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _lowerCamelCase ( a_ ):
def __init__( self : Optional[Any] , UpperCamelCase : pyspark.sql.DataFrame , UpperCamelCase : Optional[NamedSplit] = None , UpperCamelCase : Optional[Features] = None , UpperCamelCase : bool = True , UpperCamelCase : str = None , UpperCamelCase : bool = False , UpperCamelCase : str = None , UpperCamelCase : bool = True , UpperCamelCase : str = "arrow" , **UpperCamelCase : Optional[int] , ) -> List[str]:
"""simple docstring"""
super().__init__(
split=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase , streaming=UpperCamelCase , **UpperCamelCase , )
lowerCAmelCase__ : Union[str, Any] = load_from_cache_file
lowerCAmelCase__ : List[str] = file_format
lowerCAmelCase__ : Any = Spark(
df=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , working_dir=UpperCamelCase , **UpperCamelCase , )
def _lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase__ : List[str] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCamelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 299
| 1
|
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
snake_case_ = """__DUMMY_TRANSFORMERS_USER__"""
snake_case_ = """Dummy User"""
snake_case_ = """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"""
snake_case_ = """https://hub-ci.huggingface.co"""
snake_case_ = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
snake_case_ = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
snake_case_ = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] ) -> int:
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , SCREAMING_SNAKE_CASE )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , SCREAMING_SNAKE_CASE )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , SCREAMING_SNAKE_CASE )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> Dict:
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , SCREAMING_SNAKE_CASE )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ) -> List[str]:
HfFolder.save_token(SCREAMING_SNAKE_CASE )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
return HfApi(endpoint=SCREAMING_SNAKE_CASE )
@pytest.fixture(scope='session' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : HfApi ) -> Optional[int]:
__lowercase = HfFolder.get_token()
HfFolder.save_token(SCREAMING_SNAKE_CASE )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(SCREAMING_SNAKE_CASE )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
def _cleanup_repo(SCREAMING_SNAKE_CASE : str ):
hf_api.delete_repo(SCREAMING_SNAKE_CASE , token=SCREAMING_SNAKE_CASE , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]:
@contextmanager
def _temporary_repo(SCREAMING_SNAKE_CASE : Tuple ):
try:
yield repo_id
finally:
cleanup_repo(SCREAMING_SNAKE_CASE )
return _temporary_repo
@pytest.fixture(scope='session' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : HfApi , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] ) -> Any:
__lowercase = F"""repo_txt_data-{int(time.time() * 10E3 )}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(SCREAMING_SNAKE_CASE , token=SCREAMING_SNAKE_CASE , repo_type='dataset' , private=SCREAMING_SNAKE_CASE )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE , path_or_fileobj=str(SCREAMING_SNAKE_CASE ) , path_in_repo='data/text_data.txt' , repo_id=SCREAMING_SNAKE_CASE , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE , token=SCREAMING_SNAKE_CASE , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : HfApi , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
__lowercase = F"""repo_zipped_txt_data-{int(time.time() * 10E3 )}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(SCREAMING_SNAKE_CASE , token=SCREAMING_SNAKE_CASE , repo_type='dataset' , private=SCREAMING_SNAKE_CASE )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE , path_or_fileobj=str(SCREAMING_SNAKE_CASE ) , path_in_repo='data.zip' , repo_id=SCREAMING_SNAKE_CASE , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE , token=SCREAMING_SNAKE_CASE , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : HfApi , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
__lowercase = F"""repo_zipped_img_data-{int(time.time() * 10E3 )}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(SCREAMING_SNAKE_CASE , token=SCREAMING_SNAKE_CASE , repo_type='dataset' , private=SCREAMING_SNAKE_CASE )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE , path_or_fileobj=str(SCREAMING_SNAKE_CASE ) , path_in_repo='data.zip' , repo_id=SCREAMING_SNAKE_CASE , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE , token=SCREAMING_SNAKE_CASE , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
return hf_private_dataset_repo_zipped_img_data_
| 706
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = """Hello, World!"""
SCREAMING_SNAKE_CASE__ = """en_XX"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool ) -> Optional[int]:
__lowercase = Path('data_bin' )
__lowercase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(SCREAMING_SNAKE_CASE ) , bpe='sentencepiece' , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE )
__lowercase = xmod.model.encoder.sentence_encoder
__lowercase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , SCREAMING_SNAKE_CASE )
__lowercase = XmodForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowercase = xmod_sent_encoder.embed_tokens.weight
__lowercase = xmod_sent_encoder.embed_positions.weight
__lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowercase = xmod_sent_encoder.layernorm_embedding.weight
__lowercase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowercase = model.roberta.encoder.layer[i]
__lowercase = xmod_sent_encoder.layers[i]
# self attention
__lowercase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
__lowercase = xmod_layer.self_attn.q_proj.weight
__lowercase = xmod_layer.self_attn.q_proj.bias
__lowercase = xmod_layer.self_attn.k_proj.weight
__lowercase = xmod_layer.self_attn.k_proj.bias
__lowercase = xmod_layer.self_attn.v_proj.weight
__lowercase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowercase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
__lowercase = xmod_layer.self_attn.out_proj.weight
__lowercase = xmod_layer.self_attn.out_proj.bias
__lowercase = xmod_layer.self_attn_layer_norm.weight
__lowercase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowercase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
# output
__lowercase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
__lowercase = xmod_layer.final_layer_norm.weight
__lowercase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowercase = xmod_layer.adapter_layer_norm.weight
__lowercase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowercase = bert_output.adapter_modules[lang_code]
__lowercase = xmod_layer.adapter_modules[lang_code]
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowercase = xmod_sent_encoder.layer_norm.weight
__lowercase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].dense.weight
__lowercase = xmod.model.classification_heads['mnli'].dense.bias
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight
__lowercase = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__lowercase = xmod.model.encoder.lm_head.dense.weight
__lowercase = xmod.model.encoder.lm_head.dense.bias
__lowercase = xmod.model.encoder.lm_head.layer_norm.weight
__lowercase = xmod.model.encoder.lm_head.layer_norm.bias
__lowercase = xmod.model.encoder.lm_head.weight
__lowercase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowercase = xmod.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE )
__lowercase = model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'](xmod.extract_features(SCREAMING_SNAKE_CASE ) )
else:
__lowercase = xmod.model(SCREAMING_SNAKE_CASE , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowercase = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 688
| 0
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 415
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__magic_name__ =logging.getLogger(__name__)
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] ="token-classification"
def __init__(self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
if type(SCREAMING_SNAKE_CASE_ ) == dict:
UpperCamelCase__ = Namespace(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = import_module('''tasks''' )
try:
UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE_ , hparams.task_type )
UpperCamelCase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
F"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
UpperCamelCase__ = self.token_classification_task.get_labels(hparams.labels )
UpperCamelCase__ = CrossEntropyLoss().ignore_index
super().__init__(SCREAMING_SNAKE_CASE_ , len(self.labels ) , self.mode )
def _a (self , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
return self.model(**SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase__ = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase__ = self(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.hparams
for mode in ["train", "dev", "test"]:
UpperCamelCase__ = self._feature_file(SCREAMING_SNAKE_CASE_ )
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.load(SCREAMING_SNAKE_CASE_ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
UpperCamelCase__ = self.token_classification_task.read_examples_from_file(args.data_dir , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE_ , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , SCREAMING_SNAKE_CASE_ )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> DataLoader:
'''simple docstring'''
UpperCamelCase__ = self._feature_file(SCREAMING_SNAKE_CASE_ )
logger.info('''Loading features from cached file %s''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.load(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCamelCase__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCamelCase__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCamelCase__ = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCamelCase__ = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , batch_size=SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
"""Compute validation""" ""
UpperCamelCase__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase__ = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase__ = self(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ = outputs[:2]
UpperCamelCase__ = logits.detach().cpu().numpy()
UpperCamelCase__ = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _a (self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
UpperCamelCase__ = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
UpperCamelCase__ = np.argmax(SCREAMING_SNAKE_CASE_ , axis=2 )
UpperCamelCase__ = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
UpperCamelCase__ = dict(enumerate(self.labels ) )
UpperCamelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
UpperCamelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCamelCase__ = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
'''precision''': precision_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
'''recall''': recall_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
'''f1''': fa_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
}
UpperCamelCase__ = dict(results.items() )
UpperCamelCase__ = results
return ret, preds_list, out_label_list
def _a (self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._eval_end(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _a (self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._eval_end(SCREAMING_SNAKE_CASE_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCamelCase__ = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _a (SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
BaseTransformer.add_model_specific_args(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=SCREAMING_SNAKE_CASE_ , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=SCREAMING_SNAKE_CASE_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=SCREAMING_SNAKE_CASE_ , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
__magic_name__ =argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__magic_name__ =NERTransformer.add_model_specific_args(parser, os.getcwd())
__magic_name__ =parser.parse_args()
__magic_name__ =NERTransformer(args)
__magic_name__ =generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__magic_name__ =sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
__magic_name__ =model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 415
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase=None , _lowercase=None ):
"""simple docstring"""
if attention_mask is None:
a__ = tf.cast(tf.math.not_equal(__SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowerCamelCase__ :
"""simple docstring"""
UpperCamelCase__ = OPTConfig
UpperCamelCase__ = {}
UpperCamelCase__ = 'gelu'
def __init__( self : List[str] ,a__ : Optional[int] ,a__ : Optional[Any]=13 ,a__ : int=7 ,a__ : Dict=True ,a__ : List[str]=False ,a__ : Any=99 ,a__ : Optional[int]=16 ,a__ : Any=2 ,a__ : List[str]=4 ,a__ : Any=4 ,a__ : int="gelu" ,a__ : Union[str, Any]=0.1 ,a__ : Dict=0.1 ,a__ : List[Any]=20 ,a__ : Tuple=2 ,a__ : List[str]=1 ,a__ : str=0 ,a__ : Dict=16 ,a__ : str=16 ,):
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = eos_token_id
a__ = pad_token_id
a__ = bos_token_id
a__ = embed_dim
a__ = word_embed_proj_dim
a__ = False
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
a__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
a__ = tf.concat([input_ids, eos_tensor] ,axis=1 )
a__ = self.config_cls(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,embed_dim=self.embed_dim ,word_embed_proj_dim=self.word_embed_proj_dim ,is_encoder_decoder=a__ ,**self.config_updates ,)
a__ = prepare_opt_inputs_dict(a__ ,a__ )
return config, inputs_dict
def lowerCAmelCase_ ( self : Tuple ,a__ : Union[str, Any] ,a__ : List[Any] ):
a__ = TFOPTModel(config=a__ )
a__ = inputs_dict["input_ids"]
a__ = input_ids[:1, :]
a__ = inputs_dict["attention_mask"][:1, :]
a__ = 1
# first forward pass
a__ = model(a__ ,attention_mask=a__ ,use_cache=a__ )
a__ , a__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a__ = ids_tensor((self.batch_size, 3) ,config.vocab_size )
a__ = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
a__ = tf.concat([input_ids, next_tokens] ,axis=-1 )
a__ = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
a__ = model(a__ ,attention_mask=a__ )[0]
a__ = model(a__ ,attention_mask=a__ ,past_key_values=a__ )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
a__ = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
a__ = output_from_no_past[:, -3:, random_slice_idx]
a__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a__ ,a__ ,rtol=1e-3 )
@require_tf
class lowerCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
UpperCamelCase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
UpperCamelCase__ = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = 10
def lowerCAmelCase_ ( self : str ):
a__ = TFOPTModelTester(self )
a__ = ConfigTester(self ,config_class=a__ )
def lowerCAmelCase_ ( self : Tuple ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : str ):
a__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a__ )
def lowerCAmelCase_ ( self : int ):
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(a__ : List[Any] ,a__ : List[Any] ):
if hasattr(a__ ,"weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(a__ ,"weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
a__ = model_class(config=a__ )
a__ = _get_word_embedding_weight(a__ ,model.get_input_embeddings() )
a__ = _get_word_embedding_weight(a__ ,model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(a__ )
a__ = _get_word_embedding_weight(a__ ,model.get_input_embeddings() )
a__ = _get_word_embedding_weight(a__ ,model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
a__ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] ,a__ )
# check that weights remain the same after resizing
a__ = True
for pa, pa in zip(old_input_embeddings.value() ,new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
a__ = False
self.assertTrue(a__ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] ,a__ )
a__ = True
for pa, pa in zip(old_output_embeddings.value() ,new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
a__ = False
self.assertTrue(a__ )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
return tf.constant(__SCREAMING_SNAKE_CASE , dtype=tf.intaa )
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ = 99
def lowerCAmelCase_ ( self : List[str] ):
a__ = tf.ones((4, 1) ,dtype=tf.intaa ) * 2
a__ = tf.concat([ids_tensor((4, 6) ,self.vocab_size - 3 ) + 3, eos_column_vector] ,axis=1 )
a__ = input_ids.shape[0]
a__ = OPTConfig(
vocab_size=self.vocab_size ,hidden_size=24 ,num_hidden_layers=2 ,num_attention_heads=2 ,ffn_dim=32 ,max_position_embeddings=48 ,eos_token_id=2 ,pad_token_id=1 ,bos_token_id=0 ,)
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase_ ( self : List[Any] ):
a__ = TFOPTModel.from_pretrained("facebook/opt-350m" )
a__ = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
a__ = tf.not_equal(a__ ,model.config.pad_token_id )
with tf.GradientTape():
a__ = model(input_ids=a__ ,attention_mask=a__ ).last_hidden_state
a__ = (1, 11, 5_12)
self.assertEqual(output.shape ,a__ )
a__ = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] ,a__ ,atol=4e-3 ) )
a__ = tf.function(a__ ,jit_compile=a__ )
a__ = xla_generate(a__ ,a__ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] ,a__ ,atol=4e-2 ) )
@require_tf
@slow
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase_ ( self : List[Any] ):
super().setUp()
a__ = "facebook/opt-350m"
def lowerCAmelCase_ ( self : str ):
a__ = TFOPTForCausalLM.from_pretrained(self.path_model )
a__ = GPTaTokenizer.from_pretrained(self.path_model )
a__ = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
a__ = tokenizer(a__ ,return_tensors="tf" ,padding=a__ ,add_special_tokens=a__ )
a__ = tf.math.reduce_mean(model(inputs.input_ids ,attention_mask=inputs.attention_mask )[0] ,axis=-1 )
a__ = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(a__ ,a__ ,atol=1e-4 ) )
a__ = tf.function(a__ ,jit_compile=a__ )
a__ = tf.math.reduce_mean(xla_generate(inputs.input_ids ,attention_mask=inputs.attention_mask )[0] ,axis=-1 )
self.assertTrue(np.allclose(a__ ,a__ ,atol=1e-4 ) )
@require_tf
@slow
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCAmelCase_ ( self : Dict ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ = "facebook/opt-125m"
a__ = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
a__ = []
a__ = GPTaTokenizer.from_pretrained(a__ )
a__ = TFOPTForCausalLM.from_pretrained(a__ )
for prompt in self.prompts:
a__ = tokenizer(a__ ,return_tensors="tf" ).input_ids
a__ = model.generate(a__ ,max_length=10 )
a__ = tokenizer.batch_decode(a__ ,skip_special_tokens=a__ )
predicted_outputs += generated_string
self.assertListEqual(a__ ,a__ )
def lowerCAmelCase_ ( self : Any ):
a__ = "facebook/opt-350m"
a__ = GPTaTokenizer.from_pretrained(a__ )
a__ = TFOPTForCausalLM.from_pretrained(a__ )
a__ = "left"
# use different length sentences to test batching
a__ = [
"Hello, my dog is a little",
"Today, I",
]
a__ = tokenizer(a__ ,return_tensors="tf" ,padding=a__ )
a__ = inputs["input_ids"]
a__ = model.generate(input_ids=a__ ,attention_mask=inputs["attention_mask"] )
a__ = tokenizer(sentences[0] ,return_tensors="tf" ).input_ids
a__ = model.generate(input_ids=a__ )
a__ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] ,tf.intaa ) )
a__ = tokenizer(sentences[1] ,return_tensors="tf" ).input_ids
a__ = model.generate(input_ids=a__ ,max_length=model.config.max_length - num_paddings )
a__ = tokenizer.batch_decode(a__ ,skip_special_tokens=a__ )
a__ = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=a__ )
a__ = tokenizer.decode(output_padded[0] ,skip_special_tokens=a__ )
a__ = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(a__ ,a__ )
self.assertListEqual(a__ ,[non_padded_sentence, padded_sentence] )
def lowerCAmelCase_ ( self : List[Any] ):
a__ = "facebook/opt-350m"
a__ = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
a__ = []
a__ = GPTaTokenizer.from_pretrained(a__ )
a__ = TFOPTForCausalLM.from_pretrained(a__ )
for prompt in self.prompts:
a__ = tokenizer(a__ ,return_tensors="tf" ).input_ids
a__ = model.generate(a__ ,max_length=10 )
a__ = tokenizer.batch_decode(a__ ,skip_special_tokens=a__ )
predicted_outputs += generated_string
self.assertListEqual(a__ ,a__ )
| 701
|
'''simple docstring'''
def _lowerCAmelCase (_lowercase , _lowercase = " " ):
"""simple docstring"""
a__ = []
a__ = 0
for index, char in enumerate(_lowercase ):
if char == separator:
split_words.append(string[last_index:index] )
a__ = index + 1
elif index + 1 == len(_lowercase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 394
| 0
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 317
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 317
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "swinv2"
UpperCAmelCase__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , __snake_case : Dict=2_2_4 , __snake_case : List[Any]=4 , __snake_case : List[str]=3 , __snake_case : List[Any]=9_6 , __snake_case : int=[2, 2, 6, 2] , __snake_case : List[str]=[3, 6, 1_2, 2_4] , __snake_case : Union[str, Any]=7 , __snake_case : Tuple=4.0 , __snake_case : List[Any]=True , __snake_case : Dict=0.0 , __snake_case : Tuple=0.0 , __snake_case : str=0.1 , __snake_case : List[str]="gelu" , __snake_case : List[Any]=False , __snake_case : Optional[Any]=0.02 , __snake_case : Tuple=1E-5 , __snake_case : Union[str, Any]=3_2 , **__snake_case : Optional[int] , ) -> List[str]:
super().__init__(**__snake_case )
__magic_name__: int = image_size
__magic_name__: Optional[Any] = patch_size
__magic_name__: List[str] = num_channels
__magic_name__: int = embed_dim
__magic_name__: int = depths
__magic_name__: int = len(__snake_case )
__magic_name__: Union[str, Any] = num_heads
__magic_name__: str = window_size
__magic_name__: List[Any] = mlp_ratio
__magic_name__: Any = qkv_bias
__magic_name__: Dict = hidden_dropout_prob
__magic_name__: Dict = attention_probs_dropout_prob
__magic_name__: Any = drop_path_rate
__magic_name__: List[Any] = hidden_act
__magic_name__: Optional[Any] = use_absolute_embeddings
__magic_name__: List[Any] = layer_norm_eps
__magic_name__: str = initializer_range
__magic_name__: Optional[int] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__magic_name__: Optional[int] = int(embed_dim * 2 ** (len(__snake_case ) - 1) )
__magic_name__: Optional[int] = (0, 0, 0, 0)
| 703
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __A :
def __init__( self : Optional[Any] , __snake_case : Tuple , __snake_case : Any=1_3 , __snake_case : int=7 , __snake_case : Optional[int]=True , __snake_case : Tuple=True , __snake_case : Optional[int]=True , __snake_case : Any=9_9 , __snake_case : Optional[int]=3_2 , __snake_case : Any=5 , __snake_case : Optional[Any]=4 , __snake_case : Optional[Any]=3_7 , __snake_case : Dict="gelu" , __snake_case : Any=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Optional[Any]=5_1_2 , __snake_case : Dict=1_6 , __snake_case : Optional[int]=2 , __snake_case : Dict=0.02 , __snake_case : Tuple=3 , __snake_case : str=4 , __snake_case : List[str]=None , ) -> Dict:
__magic_name__: Tuple = parent
__magic_name__: Union[str, Any] = batch_size
__magic_name__: List[str] = seq_length
__magic_name__: Optional[int] = is_training
__magic_name__: Union[str, Any] = use_token_type_ids
__magic_name__: Dict = use_labels
__magic_name__: Optional[Any] = vocab_size
__magic_name__: Optional[Any] = hidden_size
__magic_name__: List[Any] = num_hidden_layers
__magic_name__: Tuple = num_attention_heads
__magic_name__: Optional[Any] = intermediate_size
__magic_name__: Dict = hidden_act
__magic_name__: Tuple = hidden_dropout_prob
__magic_name__: str = attention_probs_dropout_prob
__magic_name__: List[Any] = max_position_embeddings
__magic_name__: Any = type_vocab_size
__magic_name__: int = type_sequence_label_size
__magic_name__: int = initializer_range
__magic_name__: List[str] = num_labels
__magic_name__: Union[str, Any] = num_choices
__magic_name__: Any = scope
__magic_name__: Tuple = self.vocab_size - 1
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
__magic_name__: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__: Any = None
if self.use_token_type_ids:
__magic_name__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__: List[str] = None
__magic_name__: str = None
__magic_name__: Dict = None
if self.use_labels:
__magic_name__: int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__: Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__: str = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__: Any = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__magic_name__: int = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__ ( self : int , __snake_case : Dict , __snake_case : str , __snake_case : str , __snake_case : List[Any] , *__snake_case : str ) -> Tuple:
__magic_name__: Optional[Any] = OpenAIGPTModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__: str = model(__snake_case , token_type_ids=__snake_case , head_mask=__snake_case )
__magic_name__: str = model(__snake_case , token_type_ids=__snake_case )
__magic_name__: List[str] = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Tuple , __snake_case : Tuple , __snake_case : int , __snake_case : Tuple , __snake_case : Any , *__snake_case : Any ) -> int:
__magic_name__: List[str] = OpenAIGPTLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__: Dict = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Any , *__snake_case : Optional[Any] ) -> Tuple:
__magic_name__: Optional[int] = OpenAIGPTDoubleHeadsModel(__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__: Dict = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Dict , __snake_case : List[Any] , *__snake_case : Dict ) -> Any:
__magic_name__: Tuple = self.num_labels
__magic_name__: Optional[Any] = OpenAIGPTForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__: Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__: Dict = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
__magic_name__: Optional[Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
), (
__magic_name__
), (
__magic_name__
), (
__magic_name__
), (
__magic_name__
), (
__magic_name__
), (
__magic_name__
),
): Dict = config_and_inputs
__magic_name__: List[Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
UpperCAmelCase__ = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : List[Any] , __snake_case : Any , __snake_case : str , __snake_case : Optional[int] , __snake_case : int ) -> str:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : List[Any]=False ) -> Optional[int]:
__magic_name__: Tuple = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__magic_name__: Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case , )
__magic_name__: int = inputs_dict["""labels"""]
__magic_name__: int = inputs_dict["""labels"""]
__magic_name__: List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__snake_case , )
__magic_name__: Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[int]:
__magic_name__: Optional[Any] = OpenAIGPTModelTester(self )
__magic_name__: Optional[int] = ConfigTester(self , config_class=__snake_case , n_embd=3_7 )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : int ) -> str:
__magic_name__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__snake_case )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
__magic_name__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
def lowerCamelCase__ ( self : Any ) -> Tuple:
__magic_name__: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__snake_case )
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
__magic_name__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__snake_case )
@slow
def lowerCamelCase__ ( self : int ) -> int:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__: str = OpenAIGPTModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class __A ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__: Optional[Any] = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(__snake_case )
__magic_name__: List[Any] = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=__snake_case ) # the president is
__magic_name__: Any = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__magic_name__: Dict = model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() , __snake_case )
| 213
| 0
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCamelCase = logging.getLogger(__name__)
_lowerCamelCase = """Hello world! cécé herlolip"""
_lowerCamelCase = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Dict = BertAbsConfig(
temp_dir="." , finetune_bert=_SCREAMING_SNAKE_CASE , large=_SCREAMING_SNAKE_CASE , share_emb=_SCREAMING_SNAKE_CASE , use_bert_emb=_SCREAMING_SNAKE_CASE , encoder="bert" , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , )
UpperCAmelCase_ : Tuple = torch.load(_SCREAMING_SNAKE_CASE , lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : storage )
UpperCAmelCase_ : int = AbsSummarizer(_SCREAMING_SNAKE_CASE , torch.device("cpu" ) , _SCREAMING_SNAKE_CASE )
original.eval()
UpperCAmelCase_ : Union[str, Any] = BertAbsSummarizer(_SCREAMING_SNAKE_CASE , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
UpperCAmelCase_ : Tuple = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(_SCREAMING_SNAKE_CASE )) )
UpperCAmelCase_ : Any = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
UpperCAmelCase_ : Optional[Any] = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(_SCREAMING_SNAKE_CASE )) )
UpperCAmelCase_ : str = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
UpperCAmelCase_ : Tuple = encoder_input_ids
UpperCAmelCase_ : Optional[int] = decoder_input_ids
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Tuple = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
UpperCAmelCase_ : List[str] = original(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase_ : Any = original.generator(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = new_model(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase_ : List[str] = new_model.generator(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : str = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : int = torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
_lowerCamelCase = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 71
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
def __init__( self : int ,lowercase_ : UNetaDModel ,lowercase_ : ScoreSdeVeScheduler ):
super().__init__()
self.register_modules(unet=lowercase_ ,scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : List[str] ,lowercase_ : int = 1 ,lowercase_ : int = 2_0_0_0 ,lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowercase_ : Optional[str] = "pil" ,lowercase_ : bool = True ,**lowercase_ : Dict ,):
lowerCAmelCase__ : str = self.unet.config.sample_size
lowerCAmelCase__ : int = (batch_size, 3, img_size, img_size)
lowerCAmelCase__ : List[Any] = self.unet
lowerCAmelCase__ : Tuple = randn_tensor(lowercase_ ,generator=lowercase_ ) * self.scheduler.init_noise_sigma
lowerCAmelCase__ : List[Any] = sample.to(self.device )
self.scheduler.set_timesteps(lowercase_ )
self.scheduler.set_sigmas(lowercase_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCAmelCase__ : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowerCAmelCase__ : str = self.unet(lowercase_ ,lowercase_ ).sample
lowerCAmelCase__ : List[str] = self.scheduler.step_correct(lowercase_ ,lowercase_ ,generator=lowercase_ ).prev_sample
# prediction step
lowerCAmelCase__ : Dict = model(lowercase_ ,lowercase_ ).sample
lowerCAmelCase__ : Optional[int] = self.scheduler.step_pred(lowercase_ ,lowercase_ ,lowercase_ ,generator=lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = output.prev_sample, output.prev_sample_mean
lowerCAmelCase__ : List[str] = sample_mean.clamp(0 ,1 )
lowerCAmelCase__ : Union[str, Any] = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowerCAmelCase__ : Union[str, Any] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowercase_ )
| 450
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
A : Any = logging.getLogger(__name__)
@dataclass
class _lowercase :
"""simple docstring"""
A__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
A__ = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"})
A__ = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
A__ = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
A__ = field(
default=UpperCamelCase__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
A__ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
A__ = field(
default=UpperCamelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class _lowercase :
"""simple docstring"""
A__ = field(default=UpperCamelCase__ , metadata={"help": "The input training data file (a text file)."})
A__ = field(
default=UpperCamelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
A__ = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"})
A__ = field(
default=UpperCamelCase__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
A__ = field(
default=UpperCamelCase__ , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ = field(
default=UpperCamelCase__ , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
A__ = field(
default=UpperCamelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
A__ = field(
default=UpperCamelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
if self.train_file is not None:
lowerCamelCase__ : List[str] = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCamelCase__ : Tuple = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _lowercase :
"""simple docstring"""
A__ = 42
A__ = True
A__ = None
A__ = None
def __call__( self : Union[str, Any] , __lowerCamelCase : Any ):
'''simple docstring'''
lowerCamelCase__ : List[str] = "label" if "label" in features[0].keys() else "labels"
lowerCamelCase__ : Union[str, Any] = [feature.pop(__lowerCamelCase ) for feature in features]
lowerCamelCase__ : str = len(__lowerCamelCase )
lowerCamelCase__ : Dict = len(features[0]["input_ids"] )
lowerCamelCase__ : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(__lowerCamelCase )] for feature in features
]
lowerCamelCase__ : Any = list(chain(*__lowerCamelCase ) )
lowerCamelCase__ : Optional[Any] = self.tokenizer.pad(
__lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
lowerCamelCase__ : List[str] = {k: v.view(__lowerCamelCase , __lowerCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
lowerCamelCase__ : str = torch.tensor(__lowerCamelCase , dtype=torch.intaa )
return batch
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , _A , _A )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__ : int = training_args.get_process_log_level()
logger.setLevel(_A )
datasets.utils.logging.set_verbosity(_A )
transformers.utils.logging.set_verbosity(_A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCamelCase__ : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCamelCase__ : Dict = {}
if data_args.train_file is not None:
lowerCamelCase__ : Any = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase__ : str = data_args.validation_file
lowerCamelCase__ : Dict = data_args.train_file.split("." )[-1]
lowerCamelCase__ : List[str] = load_dataset(
_A , data_files=_A , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCamelCase__ : Dict = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ : Dict = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCamelCase__ : str = [F"ending{i}" for i in range(4 )]
lowerCamelCase__ : Dict = "sent1"
lowerCamelCase__ : str = "sent2"
if data_args.max_seq_length is None:
lowerCamelCase__ : Optional[int] = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
lowerCamelCase__ : Any = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCamelCase__ : int = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_A : Tuple ):
lowerCamelCase__ : int = [[context] * 4 for context in examples[context_name]]
lowerCamelCase__ : List[str] = examples[question_header_name]
lowerCamelCase__ : Any = [
[F"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(_A )
]
# Flatten out
lowerCamelCase__ : Tuple = list(chain(*_A ) )
lowerCamelCase__ : Optional[Any] = list(chain(*_A ) )
# Tokenize
lowerCamelCase__ : List[Any] = tokenizer(
_A , _A , truncation=_A , max_length=_A , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_A ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
lowerCamelCase__ : Dict = raw_datasets["train"]
if data_args.max_train_samples is not None:
lowerCamelCase__ : Tuple = min(len(_A ) , data_args.max_train_samples )
lowerCamelCase__ : Any = train_dataset.select(range(_A ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
lowerCamelCase__ : Tuple = train_dataset.map(
_A , batched=_A , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
lowerCamelCase__ : Union[str, Any] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
lowerCamelCase__ : Dict = min(len(_A ) , data_args.max_eval_samples )
lowerCamelCase__ : Dict = eval_dataset.select(range(_A ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
lowerCamelCase__ : Optional[int] = eval_dataset.map(
_A , batched=_A , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCamelCase__ : Tuple = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_A , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_A : Dict ):
lowerCamelCase__ , lowerCamelCase__ : Dict = eval_predictions
lowerCamelCase__ : int = np.argmax(_A , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCamelCase__ : Tuple = Trainer(
model=_A , args=_A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_A , data_collator=_A , compute_metrics=_A , )
# Training
if training_args.do_train:
lowerCamelCase__ : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ : Optional[int] = last_checkpoint
lowerCamelCase__ : Tuple = trainer.train(resume_from_checkpoint=_A )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase__ : List[Any] = train_result.metrics
lowerCamelCase__ : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_A )
)
lowerCamelCase__ : Optional[Any] = min(_A , len(_A ) )
trainer.log_metrics("train" , _A )
trainer.save_metrics("train" , _A )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCamelCase__ : Dict = trainer.evaluate()
lowerCamelCase__ : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_A )
lowerCamelCase__ : Any = min(_A , len(_A ) )
trainer.log_metrics("eval" , _A )
trainer.save_metrics("eval" , _A )
lowerCamelCase__ : int = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**_A )
else:
trainer.create_model_card(**_A )
def lowercase_ ( _A : Tuple ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 716
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : int = logging.get_logger(__name__)
A : Optional[int] = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "xmod"
def __init__( self : int , __lowerCamelCase : Any=30522 , __lowerCamelCase : Any=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : str=2 , __lowerCamelCase : List[str]=0.0_2 , __lowerCamelCase : List[str]=1E-1_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str="absolute" , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=("en_XX",) , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : List[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : List[Any] = type_vocab_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : Union[str, Any] = position_embedding_type
lowerCamelCase__ : str = use_cache
lowerCamelCase__ : Union[str, Any] = classifier_dropout
lowerCamelCase__ : Any = pre_norm
lowerCamelCase__ : Tuple = adapter_reduction_factor
lowerCamelCase__ : Tuple = adapter_layer_norm
lowerCamelCase__ : List[Any] = adapter_reuse_layer_norm
lowerCamelCase__ : Dict = ln_before_adapter
lowerCamelCase__ : List[Any] = list(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = default_language
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 5
| 0
|
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
UpperCamelCase_ : Tuple = False
try:
UpperCamelCase_ : str = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : List[str] ,a__ : str = None ,a__ : list = [] ):
a__ = 0
a__ = choices
a__ = prompt
if sys.platform == "win32":
a__ = "*"
else:
a__ = "➔ "
def lowerCAmelCase_ ( self : Any ,a__ : str ,a__ : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] ,32 ,a__ )
else:
forceWrite(self.choices[index] ,a__ )
def lowerCAmelCase_ ( self : Tuple ,a__ : int ):
if index == self.position:
forceWrite(f' {self.arrow_char} ' )
self.write_choice(a__ )
else:
forceWrite(f' {self.choices[index]}' )
reset_cursor()
def lowerCAmelCase_ ( self : Dict ,a__ : Direction ,a__ : int = 1 ):
a__ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a__ )
move_cursor(a__ ,direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def lowerCAmelCase_ ( self : int ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def lowerCAmelCase_ ( self : Dict ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def lowerCAmelCase_ ( self : List[Any] ):
move_cursor(len(self.choices ) - self.position ,"DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def lowerCAmelCase_ ( self : List[str] ):
move_cursor(len(self.choices ) - self.position ,"DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a__ )] for number in range(10 )] )
def lowerCAmelCase_ ( self : Tuple ):
a__ = int(chr(self.current_selection ) )
a__ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP ,-movement )
elif self.position < index:
self.move_direction(Direction.DOWN ,a__ )
else:
return
else:
return
def lowerCAmelCase_ ( self : int ,a__ : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt ,"\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" ,"\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" ,"\n" )
a__ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a__ )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position ,"UP" )
with cursor.hide():
while True:
if in_colab:
try:
a__ = int(builtins.input() )
except ValueError:
a__ = default_choice
else:
a__ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 ,"UP" )
clear_line()
self.write_choice(a__ ,"\n" )
return choice
| 331
|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ = None
UpperCamelCase__ = BloomTokenizerFast
UpperCamelCase__ = BloomTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = '''tokenizer_file'''
UpperCamelCase__ = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def lowerCAmelCase_ ( self : Dict ):
super().setUp()
a__ = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Dict ,**a__ : List[str] ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname ,**a__ )
def lowerCAmelCase_ ( self : Tuple ):
a__ = self.get_rust_tokenizer()
a__ = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
a__ = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
a__ = tokenizer.batch_encode_plus(a__ )["input_ids"]
self.assertListEqual(a__ ,a__ )
a__ = tokenizer.batch_decode(a__ )
self.assertListEqual(a__ ,a__ )
def lowerCAmelCase_ ( self : Tuple ,a__ : List[str]=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a__ = self.rust_tokenizer_class.from_pretrained(a__ ,**a__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
a__ = "This is a simple input"
a__ = ["This is a simple input 1", "This is a simple input 2"]
a__ = ("This is a simple input", "This is a pair")
a__ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(a__ ,max_length=a__ )
tokenizer_r.encode_plus(a__ ,max_length=a__ )
tokenizer_r.batch_encode_plus(a__ ,max_length=a__ )
tokenizer_r.encode(a__ ,max_length=a__ )
tokenizer_r.batch_encode_plus(a__ ,max_length=a__ )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
a__ = None # Hotfixing padding = None
self.assertRaises(a__ ,tokenizer_r.encode ,a__ ,max_length=a__ ,padding="max_length" )
# Simple input
self.assertRaises(a__ ,tokenizer_r.encode_plus ,a__ ,max_length=a__ ,padding="max_length" )
# Simple input
self.assertRaises(
a__ ,tokenizer_r.batch_encode_plus ,a__ ,max_length=a__ ,padding="max_length" ,)
# Pair input
self.assertRaises(a__ ,tokenizer_r.encode ,a__ ,max_length=a__ ,padding="max_length" )
# Pair input
self.assertRaises(a__ ,tokenizer_r.encode_plus ,a__ ,max_length=a__ ,padding="max_length" )
# Pair input
self.assertRaises(
a__ ,tokenizer_r.batch_encode_plus ,a__ ,max_length=a__ ,padding="max_length" ,)
def lowerCAmelCase_ ( self : Any ):
a__ = self.get_rust_tokenizer()
a__ = load_dataset("xnli" ,"all_languages" ,split="test" ,streaming=a__ )
a__ = next(iter(a__ ) )["premise"] # pick up one data
a__ = list(sample_data.values() )
a__ = list(map(tokenizer.encode ,a__ ) )
a__ = [tokenizer.decode(a__ ,clean_up_tokenization_spaces=a__ ) for x in output_tokens]
self.assertListEqual(a__ ,a__ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) ,1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) ,1 )
| 331
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase_ = {"tokenization_tapex": ["TapexTokenizer"]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 508
|
'''simple docstring'''
from typing import Any
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = data
SCREAMING_SNAKE_CASE : List[str] = None
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.head
while temp is not None:
print(temp.data, end=' ' )
SCREAMING_SNAKE_CASE : Tuple = temp.next
print()
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = Node(A )
SCREAMING_SNAKE_CASE : Any = self.head
SCREAMING_SNAKE_CASE : Optional[Any] = new_node
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.head
while node_a is not None and node_a.data != node_data_a:
SCREAMING_SNAKE_CASE : List[Any] = node_a.next
SCREAMING_SNAKE_CASE : Any = self.head
while node_a is not None and node_a.data != node_data_a:
SCREAMING_SNAKE_CASE : Any = node_a.next
if node_a is None or node_a is None:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = node_a.data, node_a.data
if __name__ == "__main__":
UpperCamelCase_ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 508
| 1
|
from __future__ import annotations
def __lowercase ( snake_case, snake_case, snake_case, snake_case, snake_case, ):
"""simple docstring"""
__magic_name__ :Optional[int] = len(snake_case )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(snake_case ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col], [*diagonal_right_collisions, row - col], [*diagonal_left_collisions, row + col], snake_case, snake_case, )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :list[list[str]] = []
depth_first_search([], [], [], snake_case, snake_case )
# Print all the boards
for board in boards:
for column in board:
print(snake_case )
print('''''' )
print(len(snake_case ), '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 0
|
def _lowercase ( __SCREAMING_SNAKE_CASE ) -> str:
return " ".join(
''.join(word[::-1] ) if len(__SCREAMING_SNAKE_CASE ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 410
| 0
|
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 )-> list:
"""simple docstring"""
snake_case_ = length or len(SCREAMING_SNAKE_CASE )
snake_case_ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
snake_case_ = list_data[i + 1], list_data[i]
snake_case_ = True
return list_data if not swapped else bubble_sort(SCREAMING_SNAKE_CASE , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
__snake_case = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
snake_case_ = VideoClassificationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase , top_k=2 )
snake_case_ = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
for example in examples:
snake_case_ = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
] , )
@require_torch
def UpperCamelCase__ ( self ):
snake_case_ = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
snake_case_ = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
snake_case_ = pipeline(
'''video-classification''' , model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , frame_sampling_rate=4 )
snake_case_ = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
snake_case_ = video_classifier(_UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}] , )
snake_case_ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def UpperCamelCase__ ( self ):
pass
| 531
| 0
|
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase = logging.get_logger(__name__)
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = ["pixel_values"]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BICUBIC , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = IMAGENET_DEFAULT_MEAN , A_ = IMAGENET_DEFAULT_STD , **A_ , ) -> None:
super().__init__(**A_ )
lowerCAmelCase = size if size is not None else {"""shortest_edge""": 224}
lowerCAmelCase = get_size_dict(A_ , default_to_square=A_ )
lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCAmelCase = get_size_dict(A_ , param_name="""crop_size""" )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __snake_case ( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) -> np.ndarray:
lowerCAmelCase = get_size_dict(A_ , default_to_square=A_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowerCAmelCase = int((256 / 224) * size["""shortest_edge"""] )
lowerCAmelCase = get_resize_output_image_size(A_ , size=A_ , default_to_square=A_ )
lowerCAmelCase = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
A_ , size=(size_dict["""height"""], size_dict["""width"""]) , resample=A_ , data_format=A_ , **A_ )
def __snake_case ( self , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
lowerCAmelCase = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(A_ , size=(size["""height"""], size["""width"""]) , data_format=A_ , **A_ )
def __snake_case ( self , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def __snake_case ( self , A_ , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def __snake_case ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> BatchFeature:
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(A_ , default_to_square=A_ )
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(A_ , param_name="""crop_size""" )
lowerCAmelCase = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(A_ ) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(A_ , A_ , A_ ) for image in images]
if do_center_crop:
lowerCAmelCase = [self.center_crop(A_ , A_ ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(A_ , A_ ) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(A_ , A_ , A_ ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 433
|
'''simple docstring'''
import re
import string
import numpy as np
import datasets
UpperCAmelCase = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
UpperCAmelCase = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
UpperCAmelCase = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def __snake_case ( self , A_ , A_ , A_=None , A_=False , A_=False , A_=False , ) -> List[Any]:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCAmelCase = np.array([re.sub(A_ , """""" , A_ ) for x in predictions] )
lowerCAmelCase = np.array([re.sub(A_ , """""" , A_ ) for x in references] )
else:
lowerCAmelCase = np.asarray(A_ )
lowerCAmelCase = np.asarray(A_ )
if ignore_case:
lowerCAmelCase = np.char.lower(A_ )
lowerCAmelCase = np.char.lower(A_ )
if ignore_punctuation:
lowerCAmelCase = string.punctuation.maketrans("""""" , """""" , string.punctuation )
lowerCAmelCase = np.char.translate(A_ , table=A_ )
lowerCAmelCase = np.char.translate(A_ , table=A_ )
if ignore_numbers:
lowerCAmelCase = string.digits.maketrans("""""" , """""" , string.digits )
lowerCAmelCase = np.char.translate(A_ , table=A_ )
lowerCAmelCase = np.char.translate(A_ , table=A_ )
lowerCAmelCase = predictions == references
return {"exact_match": np.mean(A_ ) * 100}
| 433
| 1
|
'''simple docstring'''
import re
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[str]:
if len(re.findall('''[ATCG]''' , __snake_case ) ) != len(__snake_case ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
lowerCamelCase__ : Optional[Any] = pytest.mark.integration
lowerCamelCase__ : Union[str, Any] = {"""comet"""}
lowerCamelCase__ : Dict = importlib.util.find_spec("""fairseq""") is not None
lowerCamelCase__ : List[Any] = {"""code_eval"""}
lowerCamelCase__ : Tuple = os.name == """nt"""
lowerCamelCase__ : str = {"""bertscore""", """frugalscore""", """perplexity"""}
lowerCamelCase__ : List[str] = importlib.util.find_spec("""transformers""") is not None
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]:
@wraps(__lowerCAmelCase )
def wrapper(self , __lowerCAmelCase ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self , __lowerCAmelCase )
return wrapper
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
@wraps(__lowerCAmelCase )
def wrapper(self , __lowerCAmelCase ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self , __lowerCAmelCase )
return wrapper
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Any:
@wraps(__lowerCAmelCase )
def wrapper(self , __lowerCAmelCase ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self , __lowerCAmelCase )
return wrapper
def SCREAMING_SNAKE_CASE ( ) -> str:
snake_case__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
snake_case_ ,snake_case_ ,snake_case_ )
@local
class __magic_name__ (parameterized.TestCase ):
'''simple docstring'''
__lowercase : Tuple = {}
__lowercase : List[str] = None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:str ):
snake_case__ = '''[...]'''
snake_case__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , _a ) ).module_path )
snake_case__ = datasets.load.import_main_class(metric_module.__name__ , dataset=_a )
# check parameters
snake_case__ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(_a , metric_module.__name__ ):
with self.use_local_metrics():
try:
snake_case__ = doctest.testmod(_a , verbose=_a , raise_on_error=_a )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:List[str] ):
snake_case__ = '''[...]'''
snake_case__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , _a ) ).module_path )
# run doctest
with self.use_local_metrics():
snake_case__ = doctest.testmod(_a , verbose=_a , raise_on_error=_a )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:List[str] , _a:Optional[int] ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](_a ):
yield
else:
yield
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self:str ):
def load_local_metric(_a:Dict , *_a:Optional[int] , **_a:Dict ):
return load_metric(os.path.join('''metrics''' , _a ) , *_a , **_a )
with patch('''datasets.load_metric''' ) as mock_load_metric:
snake_case__ = load_local_metric
yield
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls:Tuple , _a:Tuple ):
def wrapper(_a:Dict ):
snake_case__ = contextmanager(_a )
snake_case__ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Union[str, Any] ):
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
snake_case__ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[int]:
import torch
def bert_cos_score_idf(__lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(__lowerCAmelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
snake_case__ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict:
def load_from_checkpoint(__lowerCAmelCase ):
class __magic_name__ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:Union[str, Any] , *_a:Optional[Any] , **_a:Any ):
assert len(_a ) == 2
snake_case__ = [0.19, 0.92]
return scores, sum(_a ) / len(_a )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
snake_case__ = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
snake_case__ = load_from_checkpoint
yield
def SCREAMING_SNAKE_CASE ( ) -> int:
snake_case__ = load_metric(os.path.join('''metrics''' , '''seqeval''' ) )
snake_case__ = '''ERROR'''
snake_case__ = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(__lowerCAmelCase , match=re.escape(__lowerCAmelCase ) ):
metric.compute(predictions=[] , references=[] , scheme=__lowerCAmelCase )
| 208
| 0
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __a( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = JukeboxTokenizer
lowerCAmelCase = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def a__ ( self ) -> str:
import torch
UpperCAmelCase_ : int = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
UpperCAmelCase_ : List[str] = tokenizer(**self.metas )['''input_ids''']
# fmt: off
UpperCAmelCase_ : List[Any] = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
@require_torch
def a__ ( self ) -> List[str]:
import torch
UpperCAmelCase_ : List[Any] = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
UpperCAmelCase_ : int = tokenizer(**self.metas )['''input_ids''']
# fmt: off
UpperCAmelCase_ : Dict = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
| 30
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class _UpperCAmelCase ( __a):
__a : Optional[Any] = """SpeechT5FeatureExtractor"""
__a : Dict = """SpeechT5Tokenizer"""
def __init__( self , _A , _A ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_A , _A )
def __call__( self , *_A , **_A ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Any = kwargs.pop("""audio""" , _A )
_UpperCAmelCase : Tuple = kwargs.pop("""text""" , _A )
_UpperCAmelCase : Any = kwargs.pop("""text_target""" , _A )
_UpperCAmelCase : Optional[Any] = kwargs.pop("""audio_target""" , _A )
_UpperCAmelCase : Any = kwargs.pop("""sampling_rate""" , _A )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
_UpperCAmelCase : Optional[Any] = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
elif text is not None:
_UpperCAmelCase : List[str] = self.tokenizer(_A , **_A )
else:
_UpperCAmelCase : Optional[int] = None
if audio_target is not None:
_UpperCAmelCase : List[Any] = self.feature_extractor(audio_target=_A , *_A , sampling_rate=_A , **_A )
_UpperCAmelCase : Union[str, Any] = targets["""input_values"""]
elif text_target is not None:
_UpperCAmelCase : Optional[int] = self.tokenizer(_A , **_A )
_UpperCAmelCase : Union[str, Any] = targets["""input_ids"""]
else:
_UpperCAmelCase : List[Any] = None
if inputs is None:
return targets
if targets is not None:
_UpperCAmelCase : List[str] = labels
_UpperCAmelCase : List[str] = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
_UpperCAmelCase : Optional[int] = decoder_attention_mask
return inputs
def __snake_case ( self , *_A , **_A ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = kwargs.pop("""input_values""" , _A )
_UpperCAmelCase : List[Any] = kwargs.pop("""input_ids""" , _A )
_UpperCAmelCase : int = kwargs.pop("""labels""" , _A )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
_UpperCAmelCase : Optional[int] = self.feature_extractor.pad(_A , *_A , **_A )
elif input_ids is not None:
_UpperCAmelCase : Tuple = self.tokenizer.pad(_A , **_A )
else:
_UpperCAmelCase : Any = None
if labels is not None:
if "input_ids" in labels or (isinstance(_A , _A ) and "input_ids" in labels[0]):
_UpperCAmelCase : Optional[Any] = self.tokenizer.pad(_A , **_A )
_UpperCAmelCase : Optional[Any] = targets["""input_ids"""]
else:
_UpperCAmelCase : List[Any] = self.feature_extractor.feature_size
_UpperCAmelCase : Tuple = self.feature_extractor.num_mel_bins
_UpperCAmelCase : List[str] = self.feature_extractor.pad(_A , *_A , **_A )
_UpperCAmelCase : List[Any] = feature_size_hack
_UpperCAmelCase : Dict = targets["""input_values"""]
else:
_UpperCAmelCase : Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
_UpperCAmelCase : Union[str, Any] = labels
_UpperCAmelCase : Dict = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
_UpperCAmelCase : Optional[Any] = decoder_attention_mask
return inputs
def __snake_case ( self , *_A , **_A ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def __snake_case ( self , *_A , **_A ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
| 238
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Tuple = logging.get_logger(__name__)
_lowercase : Optional[int] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : int = "realm"
def __init__( self : Optional[int] , _lowercase : Tuple=3_05_22 , _lowercase : List[str]=7_68 , _lowercase : Tuple=1_28 , _lowercase : int=12 , _lowercase : Tuple=12 , _lowercase : List[Any]=8 , _lowercase : Tuple=30_72 , _lowercase : Tuple="gelu_new" , _lowercase : str=0.1 , _lowercase : Union[str, Any]=0.1 , _lowercase : Union[str, Any]=5_12 , _lowercase : Optional[int]=2 , _lowercase : Any=0.02 , _lowercase : Union[str, Any]=1E-12 , _lowercase : Dict=2_56 , _lowercase : Optional[int]=10 , _lowercase : List[Any]=1E-3 , _lowercase : Optional[int]=5 , _lowercase : List[str]=3_20 , _lowercase : Optional[int]=13_35_37_18 , _lowercase : List[Any]=50_00 , _lowercase : Dict=1 , _lowercase : int=0 , _lowercase : Any=2 , **_lowercase : Optional[Any] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
# Common config
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = hidden_size
__UpperCAmelCase = retriever_proj_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = num_candidates
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = initializer_range
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = layer_norm_eps
# Reader config
__UpperCAmelCase = span_hidden_size
__UpperCAmelCase = max_span_width
__UpperCAmelCase = reader_layer_norm_eps
__UpperCAmelCase = reader_beam_size
__UpperCAmelCase = reader_seq_len
# Retrieval config
__UpperCAmelCase = num_block_records
__UpperCAmelCase = searcher_beam_size
| 717
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Tuple = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Any = "cvt"
def __init__( self : List[str] , _lowercase : str=3 , _lowercase : Tuple=[7, 3, 3] , _lowercase : Any=[4, 2, 2] , _lowercase : str=[2, 1, 1] , _lowercase : Union[str, Any]=[64, 1_92, 3_84] , _lowercase : Dict=[1, 3, 6] , _lowercase : List[str]=[1, 2, 10] , _lowercase : Optional[int]=[4.0, 4.0, 4.0] , _lowercase : Dict=[0.0, 0.0, 0.0] , _lowercase : Dict=[0.0, 0.0, 0.0] , _lowercase : Tuple=[0.0, 0.0, 0.1] , _lowercase : Dict=[True, True, True] , _lowercase : Union[str, Any]=[False, False, True] , _lowercase : Dict=["dw_bn", "dw_bn", "dw_bn"] , _lowercase : int=[3, 3, 3] , _lowercase : int=[1, 1, 1] , _lowercase : Optional[Any]=[2, 2, 2] , _lowercase : List[str]=[1, 1, 1] , _lowercase : int=[1, 1, 1] , _lowercase : Union[str, Any]=0.02 , _lowercase : Optional[Any]=1E-12 , **_lowercase : str , ):
super().__init__(**_lowercase )
__UpperCAmelCase = num_channels
__UpperCAmelCase = patch_sizes
__UpperCAmelCase = patch_stride
__UpperCAmelCase = patch_padding
__UpperCAmelCase = embed_dim
__UpperCAmelCase = num_heads
__UpperCAmelCase = depth
__UpperCAmelCase = mlp_ratio
__UpperCAmelCase = attention_drop_rate
__UpperCAmelCase = drop_rate
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = cls_token
__UpperCAmelCase = qkv_projection_method
__UpperCAmelCase = kernel_qkv
__UpperCAmelCase = padding_kv
__UpperCAmelCase = stride_kv
__UpperCAmelCase = padding_q
__UpperCAmelCase = stride_q
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
| 397
| 0
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Union[str, Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__A : List[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def __a ( A__ : Dict , A__ : Dict , A__ : Any ):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
def __a ( A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
return new_state_dict
def __a ( A__ : Optional[Any] , A__ : Tuple=False ):
SCREAMING_SNAKE_CASE = ""
if is_panoptic:
SCREAMING_SNAKE_CASE = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE = in_proj_bias[:256]
SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE = in_proj_bias[-256:]
def __a ( ):
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def __a ( A__ : List[str] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
SCREAMING_SNAKE_CASE = "resnet101"
if "dc5" in model_name:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = "panoptic" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE = 250
else:
SCREAMING_SNAKE_CASE = 91
SCREAMING_SNAKE_CASE = "huggingface/label-files"
SCREAMING_SNAKE_CASE = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(A__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
# load image processor
SCREAMING_SNAKE_CASE = "coco_panoptic" if is_panoptic else "coco_detection"
SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor(format=A__ )
# prepare image
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="pt" )
SCREAMING_SNAKE_CASE = encoding["pixel_values"]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
SCREAMING_SNAKE_CASE = torch.hub.load("DeppMeng/ConditionalDETR" , A__ , pretrained=A__ ).eval()
SCREAMING_SNAKE_CASE = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
SCREAMING_SNAKE_CASE = "conditional_detr." + src
rename_key(A__ , A__ , A__ )
SCREAMING_SNAKE_CASE = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ , is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
SCREAMING_SNAKE_CASE = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
model.push_to_hub(repo_id=A__ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
SCREAMING_SNAKE_CASE = conditional_detr(A__ )
SCREAMING_SNAKE_CASE = model(A__ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__A : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 16
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__A : int = logging.get_logger(__name__)
__A : List[str] = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
__A : Optional[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__A : Tuple = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__A : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
__A : Any = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
__A : Optional[int] = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
__A : Union[str, Any] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
__A : Optional[int] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
__A : str = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
__A : Dict = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
__A : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
__A : Any = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
__A : Optional[int] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
__A : List[str] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
__A : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__A : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__A : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__A : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__A : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__A : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__A : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__A : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__A : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__A : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__A : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__A : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_MAPPING
__A : Optional[int] = auto_class_update(FlaxAutoModel)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__A : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__A : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__A : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__A : int = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__A : Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__A : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__A : List[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__A : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__A : Union[str, Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__A : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__A : int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__A : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 16
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Dict = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 223
|
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
a__ : int = datasets.logging.get_logger(__name__)
a__ : Union[str, Any] = '\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = "{COMET}: A Neural Framework for {MT} Evaluation",\n author = "Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon",\n booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",\n month = nov,\n year = "2020",\n address = "Online",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",\n pages = "2685--2702",\n}\n'
a__ : Optional[int] = '\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n'
a__ : str = '\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]\n >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]\n >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results["scores"]])\n [0.19, 0.92]\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
def __a ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def __a ( self , a ):
if self.config_name == "default":
UpperCamelCase__ = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
UpperCamelCase__ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __a ( self , a , a , a , a=None , a=False ):
if gpus is None:
UpperCamelCase__ = 1 if torch.cuda.is_available() else 0
UpperCamelCase__ = {"src": sources, "mt": predictions, "ref": references}
UpperCamelCase__ = [dict(zip(a , a ) ) for t in zip(*data.values() )]
UpperCamelCase__ , UpperCamelCase__ = self.scorer.predict(a , gpus=a , progress_bar=a )
return {"mean_score": mean_score, "scores": scores}
| 223
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ['''onnx''']
def __init__( self : List[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Tuple ) ->Union[str, Any]:
requires_backends(self , ['''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Optional[Any] ) ->Any:
requires_backends(cls , ['''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : Any , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : List[Any] ) ->Optional[Any]:
requires_backends(cls , ['''onnx'''] )
| 390
|
'''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : list[int] ):
'''simple docstring'''
if not len(_UpperCamelCase ) == len(_UpperCamelCase ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = equationa
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = equationa
# Calculate the determinants of the matrices
UpperCAmelCase_ = aa * ba - aa * ba
UpperCAmelCase_ = ca * ba - ca * ba
UpperCAmelCase_ = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCAmelCase_ = determinant_x / determinant
UpperCAmelCase_ = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 390
| 1
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowerCamelCase = 16
__lowerCamelCase = 32
def lowercase ( __UpperCamelCase ) -> Tuple:
return int(x / 2**20 )
class _lowercase :
def __enter__( self ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__magic_name__ = torch.cuda.memory_allocated()
return self
def __exit__( self , *UpperCamelCase_ ):
gc.collect()
torch.cuda.empty_cache()
__magic_name__ = torch.cuda.memory_allocated()
__magic_name__ = torch.cuda.max_memory_allocated()
__magic_name__ = bamb(self.end - self.begin )
__magic_name__ = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowercase ( __UpperCamelCase , __UpperCamelCase = 16 , __UpperCamelCase = "bert-base-cased" , __UpperCamelCase = 320 , __UpperCamelCase = 160 , ) -> Union[str, Any]:
__magic_name__ = AutoTokenizer.from_pretrained(__UpperCamelCase )
__magic_name__ = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': f'''train[:{n_train}]''', '''validation''': f'''validation[:{n_val}]'''} )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__ = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__UpperCamelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
__magic_name__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> Any:
# Initialize accelerator
__magic_name__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config['''lr''']
__magic_name__ = int(config['''num_epochs'''] )
__magic_name__ = int(config['''seed'''] )
__magic_name__ = int(config['''batch_size'''] )
__magic_name__ = args.model_name_or_path
set_seed(__UpperCamelCase )
__magic_name__ , __magic_name__ = get_dataloaders(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase , return_dict=__UpperCamelCase )
# Instantiate optimizer
__magic_name__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__ = optimizer_cls(params=model.parameters() , lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__magic_name__ = 1
__magic_name__ = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=0 , num_training_steps=__UpperCamelCase , )
else:
__magic_name__ = DummyScheduler(__UpperCamelCase , total_num_steps=__UpperCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
__magic_name__ = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__ = 0
# Now we train the model
__magic_name__ = {}
for epoch in range(__UpperCamelCase , __UpperCamelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__UpperCamelCase ):
__magic_name__ = model(**__UpperCamelCase )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__magic_name__ = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def lowercase ( ) -> List[str]:
__magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=__UpperCamelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__UpperCamelCase , )
parser.add_argument(
'''--output_dir''' , type=__UpperCamelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=__UpperCamelCase , default=__UpperCamelCase , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=__UpperCamelCase , default=320 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=__UpperCamelCase , default=160 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=__UpperCamelCase , default=1 , help='''Number of train epochs.''' , )
__magic_name__ = parser.parse_args()
__magic_name__ = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 190
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> List[str]:
__magic_name__ = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
__magic_name__ = DatasetInfosDict.from_directory(__UpperCamelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
__magic_name__ = str(__UpperCamelCase )
dataset_info.write_to_directory(__UpperCamelCase )
__magic_name__ = DatasetInfo.from_directory(__UpperCamelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__UpperCamelCase , '''dataset_info.json''' ) )
def lowercase ( ) -> Any:
__magic_name__ = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
__magic_name__ = dataset_info._to_yaml_dict()
assert sorted(__UpperCamelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
__magic_name__ = yaml.safe_dump(__UpperCamelCase )
__magic_name__ = yaml.safe_load(__UpperCamelCase )
assert dataset_info_yaml_dict == reloaded
def lowercase ( ) -> str:
__magic_name__ = DatasetInfo()
__magic_name__ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> str:
__magic_name__ = str(__UpperCamelCase )
dataset_infos_dict.write_to_directory(__UpperCamelCase )
__magic_name__ = DatasetInfosDict.from_directory(__UpperCamelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__magic_name__ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__magic_name__ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__UpperCamelCase , '''README.md''' ) )
| 190
| 1
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowercase_ : Tuple = logging.get_logger(__name__)
lowercase_ : List[Any] = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''dpt'''
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=384 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=[2, 5, 8, 11] , _lowerCAmelCase="project" , _lowerCAmelCase=[4, 2, 1, 0.5] , _lowerCAmelCase=[96, 192, 384, 768] , _lowerCAmelCase=256 , _lowerCAmelCase=-1 , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=0.4 , _lowerCAmelCase=255 , _lowerCAmelCase=0.1 , _lowerCAmelCase=[1, 1024, 24, 24] , _lowerCAmelCase=[0, 1] , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Dict:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = hidden_size
lowercase = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
lowercase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
lowercase = BitConfig(**_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
lowercase = BitConfig(**_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = backbone_config
else:
raise ValueError(
F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
lowercase = backbone_featmap_shape
lowercase = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
lowercase = None
lowercase = None
lowercase = []
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias
lowercase = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
lowercase = readout_type
lowercase = reassemble_factors
lowercase = neck_hidden_sizes
lowercase = fusion_hidden_size
lowercase = head_in_index
lowercase = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowercase = use_auxiliary_head
lowercase = auxiliary_loss_weight
lowercase = semantic_loss_ignore_index
lowercase = semantic_classifier_dropout
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
| 588
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Optional[int] = logging.get_logger(__name__)
lowercase_ : Union[str, Any] = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
lowercase_ : Dict = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ):
lowercase = torch.load(lowercase_ , map_location="""cpu""" )
return sd
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Dict , lowercase_ : Any=rename_keys_prefix ):
lowercase = OrderedDict()
lowercase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowercase = key
for name_pair in rename_keys_prefix:
lowercase = new_key.replace(name_pair[0] , name_pair[1] )
lowercase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowercase = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : int ):
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
lowercase = """pretraining"""
if "vcr" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 2048}
elif "vqa" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 2048}
elif "nlvr" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 1024}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 512}
lowercase = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 2048}
lowercase = """vqa_advanced"""
elif "vqa" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 2048, """num_labels""": 3129}
lowercase = """vqa"""
elif "nlvr" in checkpoint_path:
lowercase = {
"""visual_embedding_dim""": 1024,
"""num_labels""": 2,
}
lowercase = """nlvr"""
lowercase = VisualBertConfig(**lowercase_ )
# Load State Dict
lowercase = load_state_dict(lowercase_ )
lowercase = get_new_dict(lowercase_ , lowercase_ )
if model_type == "pretraining":
lowercase = VisualBertForPreTraining(lowercase_ )
elif model_type == "vqa":
lowercase = VisualBertForQuestionAnswering(lowercase_ )
elif model_type == "nlvr":
lowercase = VisualBertForVisualReasoning(lowercase_ )
elif model_type == "multichoice":
lowercase = VisualBertForMultipleChoice(lowercase_ )
model.load_state_dict(lowercase_ )
# Save Checkpoints
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowercase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
lowercase_ : Optional[Any] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 588
| 1
|
'''simple docstring'''
import numpy as np
def _a ( __lowercase , __lowercase ) -> np.ndarray:
"""simple docstring"""
return np.where(vector > 0 , __lowercase , (alpha * (np.exp(__lowercase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
def _a ( __lowercase , __lowercase = 0 ) -> list:
"""simple docstring"""
__UpperCamelCase = length or len(__lowercase )
__UpperCamelCase = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__UpperCamelCase , __UpperCamelCase = list_data[i + 1], list_data[i]
__UpperCamelCase = True
return list_data if not swapped else bubble_sort(__lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 567
| 0
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__lowercase : Union[str, Any] = '''<<<<<<< This should probably be modified because it mentions: '''
__lowercase : Tuple = '''=======
>>>>>>>
'''
__lowercase : Any = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
__lowercase : List[Any] = [
# (pattern, replacement)
# Order is important here for some replacements
(r'''tfds\.core''', r'''datasets'''),
(r'''tf\.io\.gfile\.GFile''', r'''open'''),
(r'''tf\.([\w\d]+)''', r'''datasets.Value(\'\1\')'''),
(r'''tfds\.features\.Text\(\)''', r'''datasets.Value(\'string\')'''),
(r'''tfds\.features\.Text\(''', r'''datasets.Value(\'string\'),'''),
(r'''features\s*=\s*tfds.features.FeaturesDict\(''', r'''features=datasets.Features('''),
(r'''tfds\.features\.FeaturesDict\(''', r'''dict('''),
(r'''The TensorFlow Datasets Authors''', r'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(r'''tfds\.''', r'''datasets.'''),
(r'''dl_manager\.manual_dir''', r'''self.config.data_dir'''),
(r'''self\.builder_config''', r'''self.config'''),
]
def lowercase ( __A : Namespace ) -> int:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _A ( snake_case ):
'''simple docstring'''
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = parser.add_parser(
"""convert""" ,help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" ,)
train_parser.add_argument(
"""--tfds_path""" ,type=SCREAMING_SNAKE_CASE_ ,required=SCREAMING_SNAKE_CASE_ ,help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" ,)
train_parser.add_argument(
"""--datasets_directory""" ,type=SCREAMING_SNAKE_CASE_ ,required=SCREAMING_SNAKE_CASE_ ,help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,*SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : int = get_logger("""datasets-cli/converting""" )
snake_case : List[str] = tfds_path
snake_case : List[Any] = datasets_directory
def snake_case_ ( self ):
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
snake_case : Tuple = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
snake_case : Dict = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
snake_case : Tuple = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
snake_case : Union[str, Any] = []
snake_case : Optional[int] = []
snake_case : List[str] = {}
if os.path.isdir(self._tfds_path ):
snake_case : int = os.listdir(SCREAMING_SNAKE_CASE_ )
else:
snake_case : List[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
snake_case : List[str] = os.path.join(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if not os.path.isfile(SCREAMING_SNAKE_CASE_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(SCREAMING_SNAKE_CASE_ ,encoding="""utf-8""" ) as f:
snake_case : str = f.readlines()
snake_case : List[str] = []
snake_case : List[str] = False
snake_case : Union[str, Any] = False
snake_case : Optional[int] = []
for line in lines:
snake_case : Union[str, Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
snake_case : str = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
snake_case : Dict = """"""
continue
elif "from absl import logging" in out_line:
snake_case : Any = """from datasets import logging\n"""
elif "getLogger" in out_line:
snake_case : List[str] = out_line.replace("""getLogger""" ,"""get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
snake_case : int = True
snake_case : List[str] = list(filter(lambda SCREAMING_SNAKE_CASE_ : e in out_line ,SCREAMING_SNAKE_CASE_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(SCREAMING_SNAKE_CASE_ ) + """\n""" )
out_lines.append(SCREAMING_SNAKE_CASE_ )
out_lines.append(SCREAMING_SNAKE_CASE_ )
continue
else:
for pattern, replacement in TO_CONVERT:
snake_case : List[str] = re.sub(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
snake_case : str = re.match(R"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" ,SCREAMING_SNAKE_CASE_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
snake_case : List[str] = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
snake_case : str = True
out_lines.append(SCREAMING_SNAKE_CASE_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
snake_case : List[Any] = f_name.replace(""".py""" ,"""""" )
snake_case : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : str = os.path.join(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
os.makedirs(SCREAMING_SNAKE_CASE_ ,exist_ok=SCREAMING_SNAKE_CASE_ )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(SCREAMING_SNAKE_CASE_ )
if needs_manual_update:
with_manual_update.append(SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ ,"""w""" ,encoding="""utf-8""" ) as f:
f.writelines(SCREAMING_SNAKE_CASE_ )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
snake_case : int = os.path.basename(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = imports_to_builder_map[f_name.replace(""".py""" ,"""""" )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 36
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__UpperCamelCase : Any = data
__UpperCamelCase : List[str] = [0x6_7_4_5_2_3_0_1, 0xe_f_c_d_a_b_8_9, 0x9_8_b_a_d_c_f_e, 0x1_0_3_2_5_4_7_6, 0xc_3_d_2_e_1_f_0]
@staticmethod
def lowerCamelCase__ ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0xf_f_f_f_f_f_f_f
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__UpperCamelCase : int = b"""\x80""" + b"""\x00""" * (63 - (len(self.data ) + 8) % 64)
__UpperCamelCase : int = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def lowerCamelCase__ ( self : str ) -> str:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : Tuple = list(struct.unpack(""">16L""" , lowerCAmelCase ) ) + [0] * 64
for i in range(16 , 80 ):
__UpperCamelCase : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__UpperCamelCase : Dict = self.padding()
__UpperCamelCase : int = self.split_blocks()
for block in self.blocks:
__UpperCamelCase : Union[str, Any] = self.expand_block(lowerCAmelCase )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
__UpperCamelCase : List[Any] = (b & c) | ((~b) & d)
__UpperCamelCase : str = 0x5_a_8_2_7_9_9_9
elif 20 <= i < 40:
__UpperCamelCase : Tuple = b ^ c ^ d
__UpperCamelCase : List[Any] = 0x6_e_d_9_e_b_a_1
elif 40 <= i < 60:
__UpperCamelCase : Tuple = (b & c) | (b & d) | (c & d)
__UpperCamelCase : List[str] = 0x8_f_1_b_b_c_d_c
elif 60 <= i < 80:
__UpperCamelCase : Optional[int] = b ^ c ^ d
__UpperCamelCase : Tuple = 0xc_a_6_2_c_1_d_6
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple = (
self.rotate(lowerCAmelCase , 5 ) + f + e + k + expanded_block[i] & 0xf_f_f_f_f_f_f_f,
a,
self.rotate(lowerCAmelCase , 30 ),
c,
d,
)
__UpperCamelCase : List[Any] = (
self.h[0] + a & 0xf_f_f_f_f_f_f_f,
self.h[1] + b & 0xf_f_f_f_f_f_f_f,
self.h[2] + c & 0xf_f_f_f_f_f_f_f,
self.h[3] + d & 0xf_f_f_f_f_f_f_f,
self.h[4] + e & 0xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h )
def A__ () -> int:
__UpperCamelCase : Optional[int] = b"""Test String"""
assert SHAaHash(snake_case ).final_hash() == hashlib.shaa(snake_case ).hexdigest() # noqa: S324
def A__ () -> Tuple:
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
__UpperCamelCase : List[Any] = parser.parse_args()
__UpperCamelCase : List[str] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
__UpperCamelCase : Tuple = f.read()
else:
__UpperCamelCase : Any = bytes(snake_case , """utf-8""" )
print(SHAaHash(snake_case ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 279
| 0
|
"""simple docstring"""
from __future__ import annotations
import time
a :Optional[int] = list[tuple[int, int]]
a :Any = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a :Optional[int] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __a :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = pos_x
SCREAMING_SNAKE_CASE__ : Tuple = pos_y
SCREAMING_SNAKE_CASE__ : Optional[int] = (pos_y, pos_x)
SCREAMING_SNAKE_CASE__ : Optional[int] = goal_x
SCREAMING_SNAKE_CASE__ : List[Any] = goal_y
SCREAMING_SNAKE_CASE__ : Dict = parent
class __a :
'''simple docstring'''
def __init__( self , _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , _a )
SCREAMING_SNAKE_CASE__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , _a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.start]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def _a ( self ) -> Path | None:
"""simple docstring"""
while self.node_queue:
SCREAMING_SNAKE_CASE__ : str = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE__ : Tuple = True
return self.retrace_path(_a )
SCREAMING_SNAKE_CASE__ : Dict = self.get_successors(_a )
for node in successors:
self.node_queue.append(_a )
if not self.reached:
return [self.start.pos]
return None
def _a ( self , _a ) -> list[Node]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for action in delta:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_a , _a , self.target.pos_y , self.target.pos_x , _a ) )
return successors
def _a ( self , _a ) -> Path:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = node
SCREAMING_SNAKE_CASE__ : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE__ : Optional[int] = current_node.parent
path.reverse()
return path
class __a :
'''simple docstring'''
def __init__( self , _a , _a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = BreadthFirstSearch(_a , _a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BreadthFirstSearch(_a , _a )
SCREAMING_SNAKE_CASE__ : List[str] = False
def _a ( self ) -> Path | None:
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
SCREAMING_SNAKE_CASE__ : str = self.fwd_bfs.node_queue.pop(0 )
SCREAMING_SNAKE_CASE__ : str = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
SCREAMING_SNAKE_CASE__ : List[str] = True
return self.retrace_bidirectional_path(
_a , _a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = current_bwd_node
SCREAMING_SNAKE_CASE__ : Tuple = current_fwd_node
SCREAMING_SNAKE_CASE__ : List[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(_a ),
self.bwd_bfs: self.bwd_bfs.get_successors(_a ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_a )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _a ( self , _a , _a ) -> Path:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.fwd_bfs.retrace_path(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = self.bwd_bfs.retrace_path(_a )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE__ : Optional[int] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a :Union[str, Any] = (0, 0)
a :Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a :Dict = time.time()
a :Any = BreadthFirstSearch(init, goal)
a :str = bfs.search()
a :Union[str, Any] = time.time() - start_bfs_time
print("Unidirectional BFS computation time : ", bfs_time)
a :Union[str, Any] = time.time()
a :int = BidirectionalBreadthFirstSearch(init, goal)
a :int = bd_bfs.search()
a :int = time.time() - start_bd_bfs_time
print("Bidirectional BFS computation time : ", bd_bfs_time)
| 12
|
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=0 ) -> Any:
# Format the message.
if name is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
else:
SCREAMING_SNAKE_CASE__ : str = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
SCREAMING_SNAKE_CASE__ : Dict = fmt.format(__lowerCAmelCase )
# Print and recurse (if needed).
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
if msg is not None:
print(__lowerCAmelCase )
for k in val.keys():
recursive_print(__lowerCAmelCase , val[k] , spaces + 2 )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
print(__lowerCAmelCase , """:""" , val.size() )
else:
print(__lowerCAmelCase , """:""" , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
SCREAMING_SNAKE_CASE__ : Tuple = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
SCREAMING_SNAKE_CASE__ : int = (num_heads, hidden_size, num_splits) + input_shape[1:]
SCREAMING_SNAKE_CASE__ : List[str] = param.view(*__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = param.transpose(0 , 2 )
SCREAMING_SNAKE_CASE__ : List[Any] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
SCREAMING_SNAKE_CASE__ : List[str] = (num_heads, num_splits, hidden_size) + input_shape[1:]
SCREAMING_SNAKE_CASE__ : Dict = param.view(*__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = param.transpose(0 , 1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Any = param.view(*__lowerCAmelCase )
return param
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
# The converted output model.
SCREAMING_SNAKE_CASE__ : List[str] = {}
# old versions did not store training args
SCREAMING_SNAKE_CASE__ : List[str] = input_state_dict.get("""args""" , __lowerCAmelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
SCREAMING_SNAKE_CASE__ : List[Any] = ds_args.padded_vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = ds_args.max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = ds_args.hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = ds_args.num_layers
SCREAMING_SNAKE_CASE__ : Dict = ds_args.num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
SCREAMING_SNAKE_CASE__ : List[str] = config.n_head
# The hidden_size per head.
SCREAMING_SNAKE_CASE__ : str = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_state_dict["""checkpoint_version"""]
else:
SCREAMING_SNAKE_CASE__ : Tuple = 0.0
# The model.
SCREAMING_SNAKE_CASE__ : Any = input_state_dict["""model"""]
# The language model.
SCREAMING_SNAKE_CASE__ : Any = model["""language_model"""]
# The embeddings.
SCREAMING_SNAKE_CASE__ : str = lm["""embedding"""]
# The word embeddings.
SCREAMING_SNAKE_CASE__ : int = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
SCREAMING_SNAKE_CASE__ : Any = word_embeddings[: config.vocab_size, :]
SCREAMING_SNAKE_CASE__ : Optional[int] = word_embeddings
# The position embeddings.
SCREAMING_SNAKE_CASE__ : Any = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
SCREAMING_SNAKE_CASE__ : Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
SCREAMING_SNAKE_CASE__ : List[Any] = pos_embeddings
# The transformer.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
SCREAMING_SNAKE_CASE__ : str = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
SCREAMING_SNAKE_CASE__ : str = layer_re.match(__lowerCAmelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
SCREAMING_SNAKE_CASE__ : Dict = int(m.group(1 ) )
# The name of the operation.
SCREAMING_SNAKE_CASE__ : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
SCREAMING_SNAKE_CASE__ : str = m.group(3 )
# The name of the layer.
SCREAMING_SNAKE_CASE__ : List[Any] = F'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
SCREAMING_SNAKE_CASE__ : Dict = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
SCREAMING_SNAKE_CASE__ : List[Any] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
SCREAMING_SNAKE_CASE__ : Any = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = causal_mask
# Insert a "dummy" tensor for masked_bias.
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(-1E4 , dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : List[str] = masked_bias
SCREAMING_SNAKE_CASE__ : List[str] = fix_query_key_value_ordering(__lowerCAmelCase , __lowerCAmelCase , 3 , __lowerCAmelCase , __lowerCAmelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
SCREAMING_SNAKE_CASE__ : str = out_val.transpose(0 , 1 ).contiguous()
# Store.
SCREAMING_SNAKE_CASE__ : Dict = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
SCREAMING_SNAKE_CASE__ : Any = fix_query_key_value_ordering(__lowerCAmelCase , __lowerCAmelCase , 3 , __lowerCAmelCase , __lowerCAmelCase )
# Store. No change of shape.
SCREAMING_SNAKE_CASE__ : str = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
SCREAMING_SNAKE_CASE__ : str = megatron_to_transformers[op_name]
SCREAMING_SNAKE_CASE__ : int = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
SCREAMING_SNAKE_CASE__ : int = megatron_to_transformers[op_name]
SCREAMING_SNAKE_CASE__ : Dict = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = transformer["""final_layernorm.weight"""]
SCREAMING_SNAKE_CASE__ : str = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
SCREAMING_SNAKE_CASE__ : Tuple = word_embeddings
# It should be done!
return output_state_dict
def _lowercase ( ) -> List[Any]:
# Create the argument parser.
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=__lowerCAmelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=__lowerCAmelCase , help="""An optional config json file describing the pre-trained model.""" , )
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
# Extract the basename.
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.load(__lowerCAmelCase , map_location="""cpu""" )
else:
SCREAMING_SNAKE_CASE__ : str = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : int = input_state_dict.get("""args""" , __lowerCAmelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
SCREAMING_SNAKE_CASE__ : Dict = """gelu_fast"""
elif ds_args.openai_gelu:
SCREAMING_SNAKE_CASE__ : Optional[Any] = """gelu_new"""
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
SCREAMING_SNAKE_CASE__ : Any = """gelu_new"""
# Spell out all parameters in case the defaults change.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=__lowerCAmelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__lowerCAmelCase , summary_activation=__lowerCAmelCase , summary_proj_to_labels=__lowerCAmelCase , summary_first_dropout=0.1 , scale_attn_weights=__lowerCAmelCase , use_cache=__lowerCAmelCase , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = GPTaConfig.from_json_file(args.config_file )
SCREAMING_SNAKE_CASE__ : Tuple = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = convert_megatron_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__lowerCAmelCase , __lowerCAmelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
SCREAMING_SNAKE_CASE__ : Tuple = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
SCREAMING_SNAKE_CASE__ : Any = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
SCREAMING_SNAKE_CASE__ : Any = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """gpt2"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = type(__lowerCAmelCase ).__name__
SCREAMING_SNAKE_CASE__ : Dict = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(__lowerCAmelCase )
# Save tokenizer based on args
print(F'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(__lowerCAmelCase )
# Store the state_dict to file.
SCREAMING_SNAKE_CASE__ : Any = os.path.join(__lowerCAmelCase , """pytorch_model.bin""" )
print(F'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 12
| 1
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( A__: list ) -> float:
if not nums:
raise ValueError('List is empty' )
return sum(A__ ) / len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 594
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def UpperCAmelCase ( A__: Dict ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = botoa.client('iam' )
__lowerCamelCase : Dict = {
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=A__ , AssumeRolePolicyDocument=json.dumps(A__ , indent=2 ) )
__lowerCamelCase : Tuple = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=A__ , PolicyName=f'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(A__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f'''role {role_name} already exists. Using existing one''' )
def UpperCAmelCase ( A__: Optional[Any] ) -> Dict:
__lowerCamelCase : List[str] = botoa.client('iam' )
return iam_client.get_role(RoleName=A__ )["Role"]["Arn"]
def UpperCAmelCase ( ) -> List[str]:
__lowerCamelCase : Any = _ask_options(
'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , A__ , )
__lowerCamelCase : str = None
if credentials_configuration == 0:
__lowerCamelCase : Union[str, Any] = _ask_field('Enter your AWS Profile name: [default] ' , default='default' )
__lowerCamelCase : Optional[int] = aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
__lowerCamelCase : str = _ask_field('AWS Access Key ID: ' )
__lowerCamelCase : Optional[Any] = aws_access_key_id
__lowerCamelCase : str = _ask_field('AWS Secret Access Key: ' )
__lowerCamelCase : str = aws_secret_access_key
__lowerCamelCase : Optional[int] = _ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' )
__lowerCamelCase : Any = aws_region
__lowerCamelCase : Optional[Any] = _ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , A__ , )
if role_management == 0:
__lowerCamelCase : Optional[Any] = _ask_field('Enter your IAM role name: ' )
else:
__lowerCamelCase : Union[str, Any] = 'accelerate_sagemaker_execution_role'
print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(A__ )
__lowerCamelCase : List[Any] = _ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=A__ , error_message='Please enter yes or no.' , )
__lowerCamelCase : List[Any] = None
if is_custom_docker_image:
__lowerCamelCase : List[Any] = _ask_field('Enter your Docker image: ' , lambda A__ : str(A__ ).lower() )
__lowerCamelCase : Union[str, Any] = _ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=A__ , error_message='Please enter yes or no.' , )
__lowerCamelCase : List[Any] = None
if is_sagemaker_inputs_enabled:
__lowerCamelCase : str = _ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda A__ : str(A__ ).lower() , )
__lowerCamelCase : Tuple = _ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=A__ , error_message='Please enter yes or no.' , )
__lowerCamelCase : Optional[int] = None
if is_sagemaker_metrics_enabled:
__lowerCamelCase : int = _ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda A__ : str(A__ ).lower() , )
__lowerCamelCase : Union[str, Any] = _ask_options(
'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , )
__lowerCamelCase : Tuple = {}
__lowerCamelCase : int = _ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=A__ , error_message='Please enter yes or no.' , )
if use_dynamo:
__lowerCamelCase : Dict = 'dynamo_'
__lowerCamelCase : List[str] = _ask_options(
'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__lowerCamelCase : List[str] = _ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=A__ , error_message='Please enter yes or no.' , )
if use_custom_options:
__lowerCamelCase : List[Any] = _ask_options(
'Which mode do you want to use?' , A__ , lambda A__ : TORCH_DYNAMO_MODES[int(A__ )] , default='default' , )
__lowerCamelCase : List[Any] = _ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=A__ , error_message='Please enter yes or no.' , )
__lowerCamelCase : str = _ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=A__ , error_message='Please enter yes or no.' , )
__lowerCamelCase : int = 'Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
__lowerCamelCase : int = _ask_options(
A__ , A__ , lambda A__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__lowerCamelCase : Optional[int] = _ask_field(A__ , lambda A__ : str(A__ ).lower() , default='ml.p3.2xlarge' )
__lowerCamelCase : List[str] = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__lowerCamelCase : List[Any] = _ask_field(
'How many machines do you want use? [1]: ' , A__ , default=1 , )
__lowerCamelCase : List[str] = _ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=A__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=A__ , use_cpu=A__ , dynamo_config=A__ , eca_instance_type=A__ , profile=A__ , region=A__ , iam_role_name=A__ , mixed_precision=A__ , num_machines=A__ , sagemaker_inputs_file=A__ , sagemaker_metrics_file=A__ , )
| 594
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCamelCase : str = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 711
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : Optional[int] = 'git_vision_model'
def __init__( self : Optional[Any] , snake_case : Any=768 , snake_case : List[str]=3072 , snake_case : Optional[Any]=12 , snake_case : Optional[Any]=12 , snake_case : Tuple=3 , snake_case : str=224 , snake_case : Tuple=16 , snake_case : Union[str, Any]="quick_gelu" , snake_case : Dict=1E-5 , snake_case : int=0.0 , snake_case : Union[str, Any]=0.02 , **snake_case : int , ):
'''simple docstring'''
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Optional[Any] = image_size
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : str = hidden_act
@classmethod
def lowerCamelCase_ ( cls : Optional[int] , snake_case : Union[str, os.PathLike] , **snake_case : List[Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = cls.get_config_dict(snake_case , **snake_case )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
SCREAMING_SNAKE_CASE : Optional[int] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case , **snake_case )
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : int = 'git'
def __init__( self : Union[str, Any] , snake_case : str=None , snake_case : List[str]=30522 , snake_case : Optional[Any]=768 , snake_case : Optional[Any]=6 , snake_case : Union[str, Any]=12 , snake_case : Union[str, Any]=3072 , snake_case : Dict="gelu" , snake_case : Optional[Any]=0.1 , snake_case : Optional[Any]=0.1 , snake_case : str=1024 , snake_case : Tuple=0.02 , snake_case : Dict=1E-12 , snake_case : List[str]=0 , snake_case : Optional[int]="absolute" , snake_case : Optional[int]=True , snake_case : Optional[int]=False , snake_case : Optional[Any]=101 , snake_case : Optional[int]=102 , snake_case : int=None , **snake_case : Any , ):
'''simple docstring'''
super().__init__(bos_token_id=snake_case , eos_token_id=snake_case , pad_token_id=snake_case , **snake_case )
if vision_config is None:
SCREAMING_SNAKE_CASE : List[Any] = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Any = position_embedding_type
SCREAMING_SNAKE_CASE : Any = use_cache
SCREAMING_SNAKE_CASE : int = tie_word_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = num_image_with_embedding
SCREAMING_SNAKE_CASE : Tuple = bos_token_id
SCREAMING_SNAKE_CASE : Union[str, Any] = eos_token_id
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : int = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : Dict = self.__class__.model_type
return output
| 308
| 0
|
def lowerCAmelCase_ ( lowercase: int ) -> int:
'''simple docstring'''
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def lowerCAmelCase_ ( lowercase: int ) -> bool:
'''simple docstring'''
_UpperCamelCase: Union[str, Any] = 0
_UpperCamelCase: Tuple = number
while duplicate > 0:
_UpperCamelCase , _UpperCamelCase: Any = divmod(lowercase , 10 )
fact_sum += factorial(lowercase )
return fact_sum == number
if __name__ == "__main__":
print('''Program to check whether a number is a Krisnamurthy Number or not.''')
UpperCAmelCase_ = int(input('''Enter number: ''').strip())
print(
f"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."""
)
| 271
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCAmelCase_ ( lowercase: str , lowercase: complex , lowercase: str = "x" , lowercase: float = 10**-10 , lowercase: int = 1 , ) -> complex:
'''simple docstring'''
_UpperCamelCase: Any = symbols(lowercase )
_UpperCamelCase: str = lambdify(lowercase , lowercase )
_UpperCamelCase: str = lambdify(lowercase , diff(lowercase , lowercase ) )
_UpperCamelCase: Optional[int] = starting_point
while True:
if diff_function(lowercase ) != 0:
_UpperCamelCase: int = prev_guess - multiplicity * func(lowercase ) / diff_function(
lowercase )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCamelCase: Any = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson('exp(x) - 1', 1_0, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 271
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _snake_case ( a__ ):
snake_case__ = "dandelin/vilt-b32-finetuned-vqa"
snake_case__ = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
snake_case__ = "image_qa"
snake_case__ = AutoProcessor
snake_case__ = AutoModelForVisualQuestionAnswering
snake_case__ = ["image", "text"]
snake_case__ = ["text"]
def __init__( self : List[Any] , *UpperCAmelCase : Any , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["vision"] )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : int , UpperCAmelCase : "Image" , UpperCAmelCase : str ):
return self.pre_processor(UpperCAmelCase , UpperCAmelCase , return_tensors="pt" )
def lowerCamelCase__ ( self : int , UpperCAmelCase : Dict ):
with torch.no_grad():
return self.model(**UpperCAmelCase ).logits
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : int ):
__lowerCamelCase : List[str] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 366
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 366
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "altclip_text_model"
def __init__( self , _UpperCAmelCase=250_002 , _UpperCAmelCase=1_024 , _UpperCAmelCase=24 , _UpperCAmelCase=16 , _UpperCAmelCase=4_096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=514 , _UpperCAmelCase=1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-05 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=768 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : int = vocab_size
__snake_case : Union[str, Any] = hidden_size
__snake_case : Optional[Any] = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : int = intermediate_size
__snake_case : Tuple = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Tuple = max_position_embeddings
__snake_case : List[Any] = type_vocab_size
__snake_case : List[str] = initializer_range
__snake_case : int = initializer_factor
__snake_case : str = layer_norm_eps
__snake_case : Union[str, Any] = position_embedding_type
__snake_case : Union[str, Any] = use_cache
__snake_case : int = project_dim
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "altclip_vision_model"
def __init__( self , _UpperCAmelCase=768 , _UpperCAmelCase=3_072 , _UpperCAmelCase=512 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3 , _UpperCAmelCase=224 , _UpperCAmelCase=32 , _UpperCAmelCase="quick_gelu" , _UpperCAmelCase=1E-5 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1.0 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : int = hidden_size
__snake_case : Optional[int] = intermediate_size
__snake_case : List[Any] = projection_dim
__snake_case : Optional[Any] = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : Union[str, Any] = num_channels
__snake_case : List[Any] = patch_size
__snake_case : List[str] = image_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : Any = initializer_factor
__snake_case : Tuple = attention_dropout
__snake_case : Union[str, Any] = layer_norm_eps
__snake_case : List[Any] = hidden_act
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : str = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type' ) == "altclip":
__snake_case : Tuple = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "altclip"
__UpperCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=768 , _UpperCAmelCase=2.6592 , **_UpperCAmelCase ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
__snake_case : Optional[int] = kwargs.pop('text_config_dict' , _UpperCAmelCase )
__snake_case : Tuple = kwargs.pop('vision_config_dict' , _UpperCAmelCase )
super().__init__(**_UpperCAmelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
__snake_case : int = {}
# This is the complete result when using `text_config_dict`.
__snake_case : List[Any] = AltCLIPTextConfig(**_UpperCAmelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
__snake_case : Optional[int] = (
F"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
F"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
__snake_case : List[Any] = (
F"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
F"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(_UpperCAmelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
__snake_case : List[str] = {}
# This is the complete result when using `vision_config_dict`.
__snake_case : Any = AltCLIPVisionConfig(**_UpperCAmelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
__snake_case : Optional[int] = {
str(_UpperCAmelCase ): value for key, value in _vision_config_dict['id2label'].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
__snake_case : Optional[int] = (
F"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
F"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
__snake_case : List[str] = (
F"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
F"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(_UpperCAmelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
__snake_case : Optional[Any] = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' )
if vision_config is None:
__snake_case : List[str] = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' )
__snake_case : str = AltCLIPTextConfig(**_UpperCAmelCase )
__snake_case : Dict = AltCLIPVisionConfig(**_UpperCAmelCase )
__snake_case : Optional[Any] = projection_dim
__snake_case : Optional[int] = logit_scale_init_value
__snake_case : List[Any] = 1.0
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
__snake_case : Dict = self.text_config.to_dict()
__snake_case : str = self.vision_config.to_dict()
__snake_case : Union[str, Any] = self.__class__.model_type
return output
| 576
|
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__magic_name__ = 10
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : list[int] , __UpperCAmelCase : int ):
for i in range(__UpperCAmelCase , __UpperCAmelCase ):
if array[i] == target:
return i
return -1
def UpperCAmelCase__( __UpperCAmelCase : list[int] , __UpperCAmelCase : int ):
__snake_case : Tuple = 0
__snake_case : Any = len(__UpperCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__snake_case : List[Any] = (left + right) // 3 + 1
__snake_case : Any = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__snake_case : int = one_third - 1
elif array[two_third] < target:
__snake_case : Any = two_third + 1
else:
__snake_case : Dict = one_third + 1
__snake_case : str = two_third - 1
else:
return -1
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : list[int] , __UpperCAmelCase : int ):
if left < right:
if right - left < precision:
return lin_search(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__snake_case : List[str] = (left + right) // 3 + 1
__snake_case : str = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__UpperCAmelCase , one_third - 1 , __UpperCAmelCase , __UpperCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , __UpperCAmelCase , __UpperCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = input('''Enter numbers separated by comma:\n''').strip()
__magic_name__ = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
__magic_name__ = int(input('''Enter the number to be found in the list:\n''').strip())
__magic_name__ = ite_ternary_search(collection, target)
__magic_name__ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 576
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case_ = 16
snake_case_ = 32
def lowerCamelCase__ ( snake_case_ : Accelerator , snake_case_ : int = 16 ) -> List[str]:
__snake_case = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__snake_case = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case_ : int ):
# max_length=None => use the model max length (it's actually the default)
__snake_case = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__snake_case = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__snake_case = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__snake_case = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__snake_case = 16
elif accelerator.mixed_precision != "no":
__snake_case = 8
else:
__snake_case = None
return tokenizer.pad(
snake_case_ , padding='''longest''' , max_length=snake_case_ , pad_to_multiple_of=snake_case_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
__snake_case = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
__snake_case = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case_ = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[Any] ) -> Tuple:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case_ ) == "1":
__snake_case = 2
# New Code #
__snake_case = int(args.gradient_accumulation_steps )
__snake_case = int(args.local_sgd_steps )
# Initialize accelerator
__snake_case = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__snake_case = config['''lr''']
__snake_case = int(config['''num_epochs'''] )
__snake_case = int(config['''seed'''] )
__snake_case = int(config['''batch_size'''] )
__snake_case = evaluate.load('''glue''' , '''mrpc''' )
set_seed(snake_case_ )
__snake_case , __snake_case = get_dataloaders(snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__snake_case = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__snake_case = model.to(accelerator.device )
# Instantiate optimizer
__snake_case = AdamW(params=model.parameters() , lr=snake_case_ )
# Instantiate scheduler
__snake_case = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=100 , num_training_steps=(len(snake_case_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Now we train the model
for epoch in range(snake_case_ ):
model.train()
with LocalSGD(
accelerator=snake_case_ , model=snake_case_ , local_sgd_steps=snake_case_ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case_ ):
__snake_case = model(**snake_case_ )
__snake_case = output.loss
accelerator.backward(snake_case_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__snake_case = model(**snake_case_ )
__snake_case = outputs.logits.argmax(dim=-1 )
__snake_case , __snake_case = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
__snake_case = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , snake_case_ )
def lowerCamelCase__ ( ) -> Optional[int]:
__snake_case = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=snake_case_ , default=snake_case_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=snake_case_ , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
__snake_case = parser.parse_args()
__snake_case = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 388
| 0
|
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowercase):
__SCREAMING_SNAKE_CASE : Dict = ["""flax""", """transformers"""]
def __init__( self : Optional[int] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : str ):
requires_backends(self , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *__UpperCamelCase : Any , **__UpperCamelCase : List[str] ):
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase__ ( cls : Dict , *__UpperCamelCase : List[str] , **__UpperCamelCase : Dict ):
requires_backends(cls , ["flax", "transformers"] )
class __SCREAMING_SNAKE_CASE ( metaclass=lowercase):
__SCREAMING_SNAKE_CASE : List[Any] = ["""flax""", """transformers"""]
def __init__( self : str , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Tuple ):
requires_backends(self , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase__ ( cls : Dict , *__UpperCamelCase : Tuple , **__UpperCamelCase : Optional[Any] ):
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] , *__UpperCamelCase : str , **__UpperCamelCase : str ):
requires_backends(cls , ["flax", "transformers"] )
class __SCREAMING_SNAKE_CASE ( metaclass=lowercase):
__SCREAMING_SNAKE_CASE : Tuple = ["""flax""", """transformers"""]
def __init__( self : Dict , *__UpperCamelCase : Tuple , **__UpperCamelCase : Optional[int] ):
requires_backends(self , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *__UpperCamelCase : str , **__UpperCamelCase : str ):
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : str ):
requires_backends(cls , ["flax", "transformers"] )
class __SCREAMING_SNAKE_CASE ( metaclass=lowercase):
__SCREAMING_SNAKE_CASE : List[Any] = ["""flax""", """transformers"""]
def __init__( self : str , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : List[str] ):
requires_backends(self , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase__ ( cls : str , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Optional[Any] ):
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def UpperCAmelCase__ ( cls : int , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Optional[int] ):
requires_backends(cls , ["flax", "transformers"] )
| 684
|
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __UpperCamelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
_UpperCAmelCase = n
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # adjacency matrix for weight
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ):
_UpperCAmelCase = w
def UpperCAmelCase__ ( self : Dict ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ):
return self.dp[u][v]
if __name__ == "__main__":
__lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 684
| 1
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = """ylacombe/bark-small"""
lowerCAmelCase__ : Tuple = tempfile.mkdtemp()
lowerCAmelCase__ : Optional[Any] = """en_speaker_1"""
lowerCAmelCase__ : List[str] = """This is a test string"""
lowerCAmelCase__ : Optional[int] = """speaker_embeddings_path.json"""
lowerCAmelCase__ : Any = """speaker_embeddings"""
def _lowerCAmelCase ( self : Optional[Any] , **UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def _lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.get_tokenizer()
lowerCAmelCase__ : int = BarkProcessor(tokenizer=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def _lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCAmelCase__ : str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase__ : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCAmelCase__ : int = 35
lowerCAmelCase__ : Any = 2
lowerCAmelCase__ : Any = 8
lowerCAmelCase__ : Optional[int] = {
"""semantic_prompt""": np.ones(UpperCAmelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCAmelCase__ : Union[str, Any] = processor(text=self.input_string , voice_preset=UpperCAmelCase_ )
lowerCAmelCase__ : int = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCAmelCase__ : Any = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase__ : int = processor(text=self.input_string , voice_preset=UpperCAmelCase_ )
lowerCAmelCase__ : List[str] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCAmelCase__ : Dict = processor(text=self.input_string , voice_preset=self.voice_preset )
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase__ : str = BarkProcessor(tokenizer=UpperCAmelCase_ )
lowerCAmelCase__ : Union[str, Any] = processor(text=self.input_string )
lowerCAmelCase__ : Optional[int] = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 711
|
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 6_3_7_8_1_3_7
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float:
lowerCAmelCase__ : str = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowerCAmelCase__ : Optional[int] = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
lowerCAmelCase__ : List[Any] = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowerCAmelCase__ : Any = haversine_distance(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowerCAmelCase__ : int = (b_lata + b_lata) / 2
lowerCAmelCase__ : Any = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowerCAmelCase__ : Optional[int] = (sin(__UpperCAmelCase ) ** 2) * (cos(__UpperCAmelCase ) ** 2)
lowerCAmelCase__ : Dict = cos(sigma / 2 ) ** 2
lowerCAmelCase__ : Union[str, Any] = (sigma - sin(__UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowerCAmelCase__ : Tuple = (cos(__UpperCAmelCase ) ** 2) * (sin(__UpperCAmelCase ) ** 2)
lowerCAmelCase__ : int = sin(sigma / 2 ) ** 2
lowerCAmelCase__ : int = (sigma + sin(__UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 507
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case : int = logging.get_logger(__name__)
snake_case : List[str] = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE):
"""simple docstring"""
__UpperCAmelCase = """deformable_detr"""
__UpperCAmelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Dict , UpperCamelCase_ : str=True , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[Any]=3_0_0 , UpperCamelCase_ : Optional[Any]=1_0_2_4 , UpperCamelCase_ : int=6 , UpperCamelCase_ : Tuple=1_0_2_4 , UpperCamelCase_ : List[Any]=8 , UpperCamelCase_ : Any=6 , UpperCamelCase_ : int=1_0_2_4 , UpperCamelCase_ : int=8 , UpperCamelCase_ : Tuple=0.0 , UpperCamelCase_ : int=True , UpperCamelCase_ : Any="relu" , UpperCamelCase_ : Dict=2_5_6 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : List[Any]=0.02 , UpperCamelCase_ : Optional[Any]=1.0 , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : Optional[Any]="sine" , UpperCamelCase_ : int="resnet50" , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : int=4 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Optional[Any]=3_0_0 , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Union[str, Any]=1 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : List[str]=1 , UpperCamelCase_ : Dict=1 , UpperCamelCase_ : Union[str, Any]=5 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Tuple=0.25 , UpperCamelCase_ : int=False , **UpperCamelCase_ : Optional[int] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
__magic_name__ = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = backbone_config.get('model_type' )
__magic_name__ = CONFIG_MAPPING[backbone_model_type]
__magic_name__ = config_class.from_dict(UpperCamelCase_ )
__magic_name__ = use_timm_backbone
__magic_name__ = backbone_config
__magic_name__ = num_channels
__magic_name__ = num_queries
__magic_name__ = max_position_embeddings
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = init_xavier_std
__magic_name__ = encoder_layerdrop
__magic_name__ = auxiliary_loss
__magic_name__ = position_embedding_type
__magic_name__ = backbone
__magic_name__ = use_pretrained_backbone
__magic_name__ = dilation
# deformable attributes
__magic_name__ = num_feature_levels
__magic_name__ = encoder_n_points
__magic_name__ = decoder_n_points
__magic_name__ = two_stage
__magic_name__ = two_stage_num_proposals
__magic_name__ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
__magic_name__ = class_cost
__magic_name__ = bbox_cost
__magic_name__ = giou_cost
# Loss coefficients
__magic_name__ = mask_loss_coefficient
__magic_name__ = dice_loss_coefficient
__magic_name__ = bbox_loss_coefficient
__magic_name__ = giou_loss_coefficient
__magic_name__ = eos_coefficient
__magic_name__ = focal_alpha
__magic_name__ = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase_ , **UpperCamelCase_ )
@property
def a__ ( self : List[Any] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def a__ ( self : Optional[Any] ):
'''simple docstring'''
return self.d_model
def a__ ( self : List[str] ):
'''simple docstring'''
__magic_name__ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__magic_name__ = self.backbone_config.to_dict()
__magic_name__ = self.__class__.model_type
return output
| 545
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"image": Image()} )
snake_case_ = Features({"labels": ClassLabel} )
snake_case_ = "image"
snake_case_ = "labels"
def UpperCamelCase_ ( self : Optional[Any] ,A : Tuple ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] ,A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__A = copy.deepcopy(self )
__A = self.label_schema.copy()
__A = features[self.label_column]
__A = label_schema
return task_template
@property
def UpperCamelCase_ ( self : Any ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 55
| 0
|
"""simple docstring"""
from __future__ import annotations
def __lowerCamelCase ( SCREAMING_SNAKE_CASE = 4 ) -> list[list[int]]:
"""simple docstring"""
_UpperCAmelCase = abs(SCREAMING_SNAKE_CASE ) or 4
return [[1 + x + y * row_size for x in range(SCREAMING_SNAKE_CASE )] for y in range(SCREAMING_SNAKE_CASE )]
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(SCREAMING_SNAKE_CASE ) )
# OR.. transpose(reverse_column(matrix))
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(SCREAMING_SNAKE_CASE ) )
# OR.. reverse_column(reverse_row(matrix))
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(SCREAMING_SNAKE_CASE ) )
# OR.. transpose(reverse_row(matrix))
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> list[list[int]]:
"""simple docstring"""
_UpperCAmelCase = [list(SCREAMING_SNAKE_CASE ) for x in zip(*SCREAMING_SNAKE_CASE )]
return matrix
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> list[list[int]]:
"""simple docstring"""
_UpperCAmelCase = matrix[::-1]
return matrix
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> list[list[int]]:
"""simple docstring"""
_UpperCAmelCase = [x[::-1] for x in matrix]
return matrix
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
for i in matrix:
print(*SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
lowerCAmelCase_ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
lowerCAmelCase_ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 494
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
lowerCAmelCase__ = """CIDAS/clipseg-rd64-refined"""
lowerCAmelCase__ = """image_segmenter"""
lowerCAmelCase__ = CLIPSegForImageSegmentation
lowerCAmelCase__ = ["""image""", """text"""]
lowerCAmelCase__ = ["""image"""]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['vision'] )
super().__init__(*a__ , **a__ )
def __A ( self , a__ , a__ ):
return self.pre_processor(text=[label] , images=[image] , padding=a__ , return_tensors='pt' )
def __A ( self , a__ ):
with torch.no_grad():
_UpperCAmelCase = self.model(**a__ ).logits
return logits
def __A ( self , a__ ):
_UpperCAmelCase = outputs.cpu().detach().numpy()
_UpperCAmelCase = 0
_UpperCAmelCase = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) )
| 494
| 1
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_UpperCAmelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase :
__SCREAMING_SNAKE_CASE : Tuple = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__SCREAMING_SNAKE_CASE : List[Any] = field(
default=_UpperCamelCase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
__SCREAMING_SNAKE_CASE : List[Any] = field(
default=_UpperCamelCase , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
__SCREAMING_SNAKE_CASE : Optional[Any] = field(
default=_UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__SCREAMING_SNAKE_CASE : int = field(
default=_UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=_UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
@dataclass
class lowercase :
__SCREAMING_SNAKE_CASE : int = field(
default=_UpperCamelCase , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__SCREAMING_SNAKE_CASE : List[str] = field(
default=_UpperCamelCase , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
__SCREAMING_SNAKE_CASE : Optional[Any] = field(
default=_UpperCamelCase , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = field(
default=_UpperCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__SCREAMING_SNAKE_CASE : Optional[Any] = field(
default=_UpperCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__SCREAMING_SNAKE_CASE : List[Any] = field(
default=_UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__SCREAMING_SNAKE_CASE : Dict = field(
default=_UpperCamelCase , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = field(
default=_UpperCamelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
__SCREAMING_SNAKE_CASE : Any = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = field(
default=_UpperCamelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__SCREAMING_SNAKE_CASE : Tuple = field(
default=_UpperCamelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , __snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ = training_args.get_process_log_level()
logger.setLevel(__snake_case )
datasets.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
snake_case_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
snake_case_ = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
snake_case_ = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = train_dataset.features['label'].names
if training_args.do_eval:
snake_case_ = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = eval_dataset.features['label'].names
if training_args.do_predict:
snake_case_ = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = predict_dataset.features['label'].names
# Labels
snake_case_ = len(__snake_case )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__snake_case , idalabel={str(__snake_case ): label for i, label in enumerate(__snake_case )} , labelaid={label: i for i, label in enumerate(__snake_case )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
snake_case_ = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
snake_case_ = False
def preprocess_function(UpperCamelCase__ ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=__snake_case , max_length=data_args.max_seq_length , truncation=__snake_case , )
if training_args.do_train:
if data_args.max_train_samples is not None:
snake_case_ = min(len(__snake_case ) , data_args.max_train_samples )
snake_case_ = train_dataset.select(range(__snake_case ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
snake_case_ = train_dataset.map(
__snake_case , batched=__snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(__snake_case ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
snake_case_ = min(len(__snake_case ) , data_args.max_eval_samples )
snake_case_ = eval_dataset.select(range(__snake_case ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
snake_case_ = eval_dataset.map(
__snake_case , batched=__snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
snake_case_ = min(len(__snake_case ) , data_args.max_predict_samples )
snake_case_ = predict_dataset.select(range(__snake_case ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
snake_case_ = predict_dataset.map(
__snake_case , batched=__snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
snake_case_ = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(UpperCamelCase__ ):
snake_case_ = p.predictions[0] if isinstance(p.predictions , __snake_case ) else p.predictions
snake_case_ = np.argmax(__snake_case , axis=1 )
return metric.compute(predictions=__snake_case , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
snake_case_ = default_data_collator
elif training_args.fpaa:
snake_case_ = DataCollatorWithPadding(__snake_case , pad_to_multiple_of=8 )
else:
snake_case_ = None
# Initialize our Trainer
snake_case_ = Trainer(
model=__snake_case , args=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__snake_case , tokenizer=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
snake_case_ = None
if training_args.resume_from_checkpoint is not None:
snake_case_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ = last_checkpoint
snake_case_ = trainer.train(resume_from_checkpoint=__snake_case )
snake_case_ = train_result.metrics
snake_case_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
snake_case_ = min(__snake_case , len(__snake_case ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , __snake_case )
trainer.save_metrics('train' , __snake_case )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ = trainer.evaluate(eval_dataset=__snake_case )
snake_case_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__snake_case )
snake_case_ = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('eval' , __snake_case )
trainer.save_metrics('eval' , __snake_case )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
snake_case_ , snake_case_ , snake_case_ = trainer.predict(__snake_case , metric_key_prefix='predict' )
snake_case_ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__snake_case )
)
snake_case_ = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('predict' , __snake_case )
trainer.save_metrics('predict' , __snake_case )
snake_case_ = np.argmax(__snake_case , axis=1 )
snake_case_ = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(__snake_case , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(__snake_case ):
snake_case_ = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 362
|
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
_A = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ ) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
_A = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ ) )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_A = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCamelCase__ ) )
def __UpperCAmelCase ( self : int ) -> Dict:
_A = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ ) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
_A = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCamelCase__ ) )
def __UpperCAmelCase ( self : str ) -> Dict:
_A = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_A = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCamelCase__, variant=UpperCamelCase__ ) )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
_A = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_A = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCamelCase__, variant=UpperCamelCase__ ) )
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
# pass variant but use the non-variant filenames
_A = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
_A = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCamelCase__, variant=UpperCamelCase__ ) )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
_A = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_A = 'fp16'
self.assertFalse(is_safetensors_compatible(UpperCamelCase__, variant=UpperCamelCase__ ) )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
_A = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
_A = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCamelCase__, variant=UpperCamelCase__ ) )
def __UpperCAmelCase ( self : Tuple ) -> str:
# pass variant but use the non-variant filenames
_A = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
_A = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCamelCase__, variant=UpperCamelCase__ ) )
def __UpperCAmelCase ( self : List[Any] ) -> int:
_A = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_A = 'fp16'
self.assertFalse(is_safetensors_compatible(UpperCamelCase__, variant=UpperCamelCase__ ) )
| 107
| 0
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCAmelCase = get_tests_dir("""fixtures""")
_lowerCAmelCase = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_lowerCAmelCase = get_tests_dir("""fixtures/dummy-config.json""")
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = 0
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : int = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_lowerCAmelCase : Any = AutoFeatureExtractor.from_pretrained(_A ).to_dict()
config_dict.pop('feature_extractor_type' )
_lowerCAmelCase : str = WavaVecaFeatureExtractor(**_A )
# save in new folder
model_config.save_pretrained(_A )
config.save_pretrained(_A )
_lowerCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(_A )
# make sure private variable is not incorrectly saved
_lowerCAmelCase : Dict = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_A ,'bert-base is not a local folder and is not a valid model identifier' ):
_lowerCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained('bert-base' )
def __lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_A ,r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_lowerCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(_A ,revision='aaaaaa' )
def __lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_A ,'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' ,):
_lowerCAmelCase : str = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def __lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(_A ):
_lowerCAmelCase : List[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A ):
_lowerCAmelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=_A )
_lowerCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_A )
_lowerCAmelCase : str = AutoFeatureExtractor.from_pretrained(_A ,trust_remote_code=_A )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
def __lowerCamelCase ( self ):
'''simple docstring'''
try:
AutoConfig.register('custom' ,_A )
AutoFeatureExtractor.register(_A ,_A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoFeatureExtractor.register(_A ,_A )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCAmelCase : Tuple = CustomFeatureExtractor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_A )
_lowerCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self ):
'''simple docstring'''
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = True
try:
AutoConfig.register('custom' ,_A )
AutoFeatureExtractor.register(_A ,_A )
# If remote code is not set, the default is to use local
_lowerCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_lowerCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_lowerCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
self.assertTrue(not hasattr(_A ,'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 16
|
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 128 ,_A = 256 ,_A = 2_0_0_0.0 ,_A = 768 ,_A = 12 ,_A = 12 ,_A = 64 ,_A = 2048 ,_A = 0.1 ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Sequential(
nn.Linear(_A ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,)
_lowerCAmelCase : Any = nn.Embedding(_A ,_A )
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : int = nn.Dropout(p=_A )
_lowerCAmelCase : int = nn.ModuleList()
for lyr_num in range(_A ):
# FiLM conditional T5 decoder
_lowerCAmelCase : Any = DecoderLayer(d_model=_A ,d_kv=_A ,num_heads=_A ,d_ff=_A ,dropout_rate=_A )
self.decoders.append(_A )
_lowerCAmelCase : Optional[Any] = TaLayerNorm(_A )
_lowerCAmelCase : List[str] = nn.Dropout(p=_A )
_lowerCAmelCase : Optional[Any] = nn.Linear(_A ,_A ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
_lowerCAmelCase : Union[str, Any] = self.conditioning_emb(_A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase : Union[str, Any] = torch.broadcast_to(
torch.arange(_A ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
_lowerCAmelCase : Any = self.position_encoding(_A )
_lowerCAmelCase : str = self.continuous_inputs_projection(_A )
inputs += position_encodings
_lowerCAmelCase : int = self.dropout(_A )
# decoder: No padding present.
_lowerCAmelCase : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase : Optional[Any] = [(x, self.encoder_decoder_mask(_A ,_A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
_lowerCAmelCase : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase : Tuple = lyr(
_A ,conditioning_emb=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,)[0]
_lowerCAmelCase : Any = self.decoder_norm(_A )
_lowerCAmelCase : List[Any] = self.post_dropout(_A )
_lowerCAmelCase : int = self.spec_out(_A )
return spec_out
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_A ,d_ff=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ) )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Any = self.layer[0](
_A ,conditioning_emb=_A ,attention_mask=_A ,)
if encoder_hidden_states is not None:
_lowerCAmelCase : Any = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase : str = self.layer[1](
_A ,key_value_states=_A ,attention_mask=_A ,)
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase : Optional[Any] = self.layer[-1](_A ,_A )
return (hidden_states,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = TaLayerNorm(_A )
_lowerCAmelCase : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Dict = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.FiLMLayer(_A ,_A )
# Self-attention block
_lowerCAmelCase : Union[str, Any] = self.attention(_A )
_lowerCAmelCase : Optional[Any] = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Optional[int] = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.layer_norm(_A )
_lowerCAmelCase : str = self.attention(
_A ,encoder_hidden_states=_A ,attention_mask=attention_mask.squeeze(1 ) ,)
_lowerCAmelCase : Any = hidden_states + self.dropout(_A )
return layer_output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=_A ,d_ff=_A ,dropout_rate=_A )
_lowerCAmelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Any = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.film(_A ,_A )
_lowerCAmelCase : str = self.DenseReluDense(_A )
_lowerCAmelCase : Tuple = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Any = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
_lowerCAmelCase : int = NewGELUActivation()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.act(self.wi_a(_A ) )
_lowerCAmelCase : Optional[int] = self.wi_a(_A )
_lowerCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear
_lowerCAmelCase : Dict = self.dropout(_A )
_lowerCAmelCase : Dict = self.wo(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(_A ) )
_lowerCAmelCase : Optional[int] = eps
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_A )
_lowerCAmelCase : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __UpperCamelCase ( nn.Module ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_A ,3.0 )) ))
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = nn.Linear(_A ,out_features * 2 ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scale_bias(_A )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = torch.chunk(_A ,2 ,-1 )
_lowerCAmelCase : List[Any] = x * (1 + scale) + shift
return x
| 16
| 1
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: list[int] ):
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
snake_case : Any = sum(lowerCamelCase_ ) / len(lowerCamelCase_ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 449
|
"""simple docstring"""
from __future__ import annotations
A = '#'
class _a :
def __init__( self : List[Any] ) -> None:
snake_case : dict = {}
def __lowercase ( self : str , _lowercase : str ) -> None:
snake_case : Optional[int] = self._trie
for char in text:
if char not in trie:
snake_case : str = {}
snake_case : Optional[Any] = trie[char]
snake_case : str = True
def __lowercase ( self : List[Any] , _lowercase : str ) -> tuple | list:
snake_case : Dict = self._trie
for char in prefix:
if char in trie:
snake_case : Tuple = trie[char]
else:
return []
return self._elements(_lowercase )
def __lowercase ( self : Optional[int] , _lowercase : dict ) -> tuple:
snake_case : int = []
for c, v in d.items():
snake_case : int = [" "] if c == END else [(c + s) for s in self._elements(_lowercase )]
result.extend(_lowercase )
return tuple(_lowercase )
A = Trie()
A = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: str ):
"""simple docstring"""
snake_case : Optional[int] = trie.find_word(lowerCamelCase_ )
return tuple(string + word for word in suffixes )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 449
| 1
|
from __future__ import annotations
import math
from collections.abc import Callable
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 100 , ):
'''simple docstring'''
lowerCamelCase : Any = x_start
lowerCamelCase : Optional[Any] = fnc(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[Any] = 0.0
for _ in range(SCREAMING_SNAKE_CASE_ ):
# Approximates curve as a sequence of linear lines and sums their length
lowerCamelCase : List[Any] = (x_end - x_start) / steps + xa
lowerCamelCase : List[str] = fnc(SCREAMING_SNAKE_CASE_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowerCamelCase : Optional[Any] = xa
lowerCamelCase : List[str] = fxa
return length
if __name__ == "__main__":
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
_snake_case = 10
while i <= 10_00_00:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 703
|
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
lowerCamelCase : Tuple = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
lowerCamelCase : Any = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
lowerCamelCase : Any = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231
| 0
|
from __future__ import annotations
def _UpperCAmelCase ( UpperCamelCase: list , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
__lowerCAmelCase = []
__lowerCAmelCase , __lowerCAmelCase = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__lowerCAmelCase = result + left + right
return input_list
def _UpperCAmelCase ( UpperCamelCase: list ):
"""simple docstring"""
if len(UpperCamelCase ) <= 1:
return input_list
__lowerCAmelCase = list(UpperCamelCase )
# iteration for two-way merging
__lowerCAmelCase = 2
while p <= len(UpperCamelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase ):
__lowerCAmelCase = i
__lowerCAmelCase = i + p - 1
__lowerCAmelCase = (low + high + 1) // 2
__lowerCAmelCase = merge(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# final merge of last two parts
if p * 2 >= len(UpperCamelCase ):
__lowerCAmelCase = i
__lowerCAmelCase = merge(UpperCamelCase , 0 , UpperCamelCase , len(UpperCamelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
UpperCamelCase_ = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
UpperCamelCase_ = []
else:
UpperCamelCase_ = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 611
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 611
| 1
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=UpperCAmelCase_ , )
assert hasattr(self , '''env''' )
def __snake_case ( self , UpperCAmelCase_ ):
# configuration for running training on smdistributed Model Parallel
lowerCAmelCase = {
'''enabled''': True,
'''processes_per_host''': 8,
}
lowerCAmelCase = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
lowerCAmelCase = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
lowerCAmelCase = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=UpperCAmelCase_ , instance_type=self.instance_type , debugger_hook_config=UpperCAmelCase_ , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=UpperCAmelCase_ , py_version='''py36''' , )
def __snake_case ( self , UpperCAmelCase_ ):
TrainingJobAnalytics(UpperCAmelCase_ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __snake_case ( self , UpperCAmelCase_ ):
# create estimator
lowerCAmelCase = self.create_estimator(UpperCAmelCase_ )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCAmelCase_ )
| 717
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def UpperCAmelCase ( _snake_case = 3 ):
if isinstance(_snake_case , _snake_case ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_snake_case ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
lowerCAmelCase = QuantumRegister(_snake_case , '''qr''' )
lowerCAmelCase = ClassicalRegister(_snake_case , '''cr''' )
lowerCAmelCase = QuantumCircuit(_snake_case , _snake_case )
lowerCAmelCase = number_of_qubits
for i in range(_snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_snake_case , _snake_case )
# simulate with 10000 shots
lowerCAmelCase = Aer.get_backend('''qasm_simulator''' )
lowerCAmelCase = execute(_snake_case , _snake_case , shots=10000 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(
F'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 33
| 0
|
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
A__ = _modexpt(__UpperCamelCase , exponent // 2 , __UpperCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__UpperCamelCase , exponent - 1 , __UpperCamelCase )) % modulo_value
def A ( __UpperCamelCase = 1_777 , __UpperCamelCase = 1_855 , __UpperCamelCase = 8 ) -> int:
A__ = base
for _ in range(1 , __UpperCamelCase ):
A__ = _modexpt(__UpperCamelCase , __UpperCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 9
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
UpperCAmelCase_ = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 539
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__A =logging.get_logger(__name__)
class _snake_case ( __UpperCAmelCase ):
lowerCAmelCase :Optional[Any] = ['''input_features''', '''attention_mask''']
def __init__( self , _lowerCamelCase=80 , _lowerCamelCase=1_6000 , _lowerCamelCase=80 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , **_lowerCamelCase , ):
super().__init__(feature_size=lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , padding_value=lowerCAmelCase_ , **lowerCAmelCase_)
UpperCAmelCase__ : List[str] = num_mel_bins
UpperCAmelCase__ : str = do_ceptral_normalize
UpperCAmelCase__ : Tuple = normalize_means
UpperCAmelCase__ : Dict = normalize_vars
UpperCAmelCase__ : Dict = True
def snake_case__ ( self , _lowerCamelCase , ):
UpperCAmelCase__ : str = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
UpperCAmelCase__ : Any = torch.from_numpy(lowerCAmelCase_).unsqueeze(0)
UpperCAmelCase__ : Optional[Any] = ta_kaldi.fbank(lowerCAmelCase_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate)
return features.numpy()
@staticmethod
def snake_case__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = 0.0 , ):
if normalize_means:
UpperCAmelCase__ : int = x[:input_length].mean(axis=0)
UpperCAmelCase__ : Optional[Any] = np.subtract(lowerCAmelCase_ , lowerCAmelCase_)
if normalize_vars:
UpperCAmelCase__ : List[Any] = x[:input_length].std(axis=0)
UpperCAmelCase__ : int = np.divide(lowerCAmelCase_ , lowerCAmelCase_)
if input_length < x.shape[0]:
UpperCAmelCase__ : List[str] = padding_value
# make sure array is in float32
UpperCAmelCase__ : List[str] = x.astype(np.floataa)
return x
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
UpperCAmelCase__ : Tuple = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowerCAmelCase_ , lowerCAmelCase_ , self.normalize_means , self.normalize_vars , self.padding_value)
for x, n in zip(lowerCAmelCase_ , lowerCAmelCase_)
]
def __call__( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''')
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
UpperCAmelCase__ : Dict = isinstance(lowerCAmelCase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''')
UpperCAmelCase__ : Optional[Any] = is_batched_numpy or (
isinstance(lowerCAmelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
UpperCAmelCase__ : Dict = [np.asarray(lowerCAmelCase_ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase_ , np.ndarray):
UpperCAmelCase__ : Dict = np.asarray(lowerCAmelCase_ , dtype=np.floataa)
elif isinstance(lowerCAmelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
UpperCAmelCase__ : List[Any] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
UpperCAmelCase__ : Tuple = [raw_speech]
# extract fbank features
UpperCAmelCase__ : str = [self._extract_fbank_features(lowerCAmelCase_) for waveform in raw_speech]
# convert into correct format for padding
UpperCAmelCase__ : Dict = BatchFeature({"""input_features""": features})
UpperCAmelCase__ : int = self.pad(
lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ , )
# make sure list is in array format
UpperCAmelCase__ : Optional[Any] = padded_inputs.get("""input_features""")
if isinstance(input_features[0] , lowerCAmelCase_):
UpperCAmelCase__ : Dict = [np.asarray(lowerCAmelCase_ , dtype=np.floataa) for feature in input_features]
UpperCAmelCase__ : Any = padded_inputs.get("""attention_mask""")
if attention_mask is not None:
UpperCAmelCase__ : Dict = [np.asarray(lowerCAmelCase_ , dtype=np.intaa) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
UpperCAmelCase__ : Dict = (
np.array(lowerCAmelCase_ , dtype=np.intaa)
if self._get_padding_strategies(lowerCAmelCase_ , max_length=lowerCAmelCase_) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCAmelCase__ : str = self.normalize(
padded_inputs["""input_features"""] , attention_mask=lowerCAmelCase_)
if return_tensors is not None:
UpperCAmelCase__ : Any = padded_inputs.convert_to_tensors(lowerCAmelCase_)
return padded_inputs
| 712
|
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__A =object()
# For specifying empty leaf dict `{}`
__A =object()
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Union[str, Any] = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
UpperCAmelCase__ : Optional[Any] = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def _UpperCamelCase ( UpperCamelCase__ ):
def replace(UpperCamelCase__ , UpperCamelCase__ ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def _UpperCamelCase ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P("""mp""" , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Optional[int] = _get_partition_rules()
UpperCAmelCase__ : Optional[int] = _replacement_rules(UpperCamelCase__ )
UpperCAmelCase__ : List[Any] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
UpperCAmelCase__ : Tuple = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 113
| 0
|
"""simple docstring"""
from jiwer import compute_measures
import datasets
a : Dict = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
a : Optional[int] = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
a : int = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __a ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def __a ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False ) -> Union[str, Any]:
if concatenate_texts:
return compute_measures(lowerCAmelCase__ , lowerCAmelCase__ )["wer"]
else:
a : Any = 0
a : Optional[int] = 0
for prediction, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Optional[Any] = compute_measures(lowerCAmelCase__ , lowerCAmelCase__ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 633
|
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 633
| 1
|
"""simple docstring"""
import os
def _lowerCamelCase ( ):
with open(os.path.dirname(__a ) + '''/p022_names.txt''' ) as file:
SCREAMING_SNAKE_CASE_ = str(file.readlines()[0] )
SCREAMING_SNAKE_CASE_ = names.replace('''"''', '''''' ).split(''',''' )
names.sort()
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for i, name in enumerate(__a ):
for letter in name:
name_score += ord(__a ) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE_ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 628
|
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = TransfoXLTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''<unk> UNwanted , running'''
SCREAMING_SNAKE_CASE_ = '''<unk> unwanted, running'''
return input_text, output_text
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [0, 4, 8, 7] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
SCREAMING_SNAKE_CASE_ = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE_ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 628
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCamelCase__ : int = None
lowerCamelCase__ : Any = logging.get_logger(__name__)
lowerCamelCase__ : str = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ : Union[str, Any] = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase__ : Optional[Any] = {
'facebook/nllb-large-en-ro': 1_024,
'facebook/nllb-200-distilled-600M': 1_024,
}
# fmt: off
lowerCamelCase__ : List[Any] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = ["input_ids", "attention_mask"]
lowercase_ = NllbTokenizer
lowercase_ = []
lowercase_ = []
def __init__( self : Tuple , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Union[str, Any]="<s>" , _lowerCAmelCase : Tuple="</s>" , _lowerCAmelCase : Any="</s>" , _lowerCAmelCase : Union[str, Any]="<s>" , _lowerCAmelCase : Dict="<unk>" , _lowerCAmelCase : int="<pad>" , _lowerCAmelCase : Optional[int]="<mask>" , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE_ = legacy_behaviour
super().__init__(
vocab_file=_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , src_lang=_lowerCAmelCase , tgt_lang=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , legacy_behaviour=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
SCREAMING_SNAKE_CASE_ = {
lang_code: self.convert_tokens_to_ids(_lowerCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
SCREAMING_SNAKE_CASE_ = src_lang if src_lang is not None else 'eng_Latn'
SCREAMING_SNAKE_CASE_ = self.convert_tokens_to_ids(self._src_lang )
SCREAMING_SNAKE_CASE_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCAmelCase_ ( self : Tuple ):
return self._src_lang
@src_lang.setter
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] , _lowerCAmelCase : Optional[str] , **_lowerCAmelCase : Optional[Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
SCREAMING_SNAKE_CASE_ = src_lang
SCREAMING_SNAKE_CASE_ = self(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.convert_tokens_to_ids(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = tgt_lang_id
return inputs
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str = "eng_Latn" , _lowerCAmelCase : Optional[List[str]] = None , _lowerCAmelCase : str = "fra_Latn" , **_lowerCAmelCase : List[str] , ):
SCREAMING_SNAKE_CASE_ = src_lang
SCREAMING_SNAKE_CASE_ = tgt_lang
return super().prepare_seqaseq_batch(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase_ ( self : Any ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : List[Any] ):
SCREAMING_SNAKE_CASE_ = self.convert_tokens_to_ids(_lowerCAmelCase )
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE_ = [self.cur_lang_code]
SCREAMING_SNAKE_CASE_ = [self.eos_token_id]
SCREAMING_SNAKE_CASE_ = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE_ = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = self.convert_tokens_to_ids(_lowerCAmelCase )
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE_ = [self.cur_lang_code]
SCREAMING_SNAKE_CASE_ = [self.eos_token_id]
SCREAMING_SNAKE_CASE_ = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE_ = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,)
| 31
|
"""simple docstring"""
import re
def __A (_SCREAMING_SNAKE_CASE ) ->list:
"""simple docstring"""
return [char.split() for char in re.split(r'[^ a-z A-Z 0-9 \s]' , str_ )]
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
try:
lowerCAmelCase__ :Any = split_input(_SCREAMING_SNAKE_CASE )
if upper:
lowerCAmelCase__ :str = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowerCAmelCase__ :int = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
return to_simple_case(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
try:
lowerCAmelCase__ :str = to_simple_case(_SCREAMING_SNAKE_CASE )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
return to_complex_case(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '_' )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
return to_complex_case(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '-' )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 93
| 0
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : Union[str, Any] = 0
def a__ ( self ) -> Any:
_lowerCamelCase : Any = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_lowercase , _lowercase )
def a__ ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Union[str, Any] = Path(_lowercase ) / '''preprocessor_config.json'''
_lowerCamelCase : Union[str, Any] = Path(_lowercase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_lowercase , '''w''' ) )
_lowerCamelCase : List[str] = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def a__ ( self ) -> str:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Tuple = Path(_lowercase ) / '''preprocessor_config.json'''
_lowerCamelCase : Optional[int] = Path(_lowercase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_lowercase , '''w''' ) )
_lowerCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def a__ ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Tuple = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_lowerCamelCase : Optional[Any] = Path(_lowercase ) / '''preprocessor_config.json'''
_lowerCamelCase : Dict = Path(_lowercase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_lowercase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_lowerCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(_lowercase ).to_dict()
config_dict.pop('''image_processor_type''' )
_lowerCamelCase : Tuple = CLIPImageProcessor(**_lowercase )
# save in new folder
model_config.save_pretrained(_lowercase )
config.save_pretrained(_lowercase )
_lowerCamelCase : List[str] = AutoImageProcessor.from_pretrained(_lowercase )
# make sure private variable is not incorrectly saved
_lowerCamelCase : Dict = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_lowercase , _lowercase )
def a__ ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Any = Path(_lowercase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , )
_lowerCamelCase : List[Any] = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def a__ ( self ) -> str:
with self.assertRaisesRegex(
_lowercase , '''clip-base is not a local folder and is not a valid model identifier''' ):
_lowerCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained('''clip-base''' )
def a__ ( self ) -> Any:
with self.assertRaisesRegex(
_lowercase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_lowerCamelCase : int = AutoImageProcessor.from_pretrained(_lowercase , revision='''aaaaaa''' )
def a__ ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
_lowercase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_lowerCamelCase : int = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def a__ ( self ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowercase ):
_lowerCamelCase : Tuple = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowercase ):
_lowerCamelCase : List[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowercase )
_lowerCamelCase : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowercase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowercase )
_lowerCamelCase : List[str] = AutoImageProcessor.from_pretrained(_lowercase , trust_remote_code=_lowercase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def a__ ( self ) -> str:
try:
AutoConfig.register('''custom''' , _lowercase )
AutoImageProcessor.register(_lowercase , _lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowercase ):
AutoImageProcessor.register(_lowercase , _lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Tuple = Path(_lowercase ) / '''preprocessor_config.json'''
_lowerCamelCase : Optional[Any] = Path(_lowercase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_lowercase , '''w''' ) )
_lowerCamelCase : Dict = CustomImageProcessor.from_pretrained(_lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowercase )
_lowerCamelCase : str = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> Tuple:
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = True
try:
AutoConfig.register('''custom''' , _lowercase )
AutoImageProcessor.register(_lowercase , _lowercase )
# If remote code is not set, the default is to use local
_lowerCamelCase : int = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_lowerCamelCase : Dict = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowercase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_lowerCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowercase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_lowercase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 717
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = ["""image_processor""", """tokenizer"""]
__snake_case = """ViltImageProcessor"""
__snake_case = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , _lowercase=None , _lowercase=None , **_lowercase ) -> Tuple:
_lowerCamelCase : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _lowercase , )
_lowerCamelCase : Tuple = kwargs.pop('''feature_extractor''' )
_lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_lowercase , _lowercase )
_lowerCamelCase : Union[str, Any] = self.image_processor
def __call__( self , _lowercase , _lowercase = None , _lowercase = True , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = True , _lowercase = None , **_lowercase , ) -> BatchEncoding:
_lowerCamelCase : Dict = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_token_type_ids=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
# add pixel_values + pixel_mask
_lowerCamelCase : List[Any] = self.image_processor(_lowercase , return_tensors=_lowercase )
encoding.update(_lowercase )
return encoding
def a__ ( self , *_lowercase , **_lowercase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def a__ ( self , *_lowercase , **_lowercase ) -> Optional[int]:
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def a__ ( self ) -> Optional[Any]:
_lowerCamelCase : Any = self.tokenizer.model_input_names
_lowerCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a__ ( self ) -> Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _lowercase , )
return self.image_processor_class
@property
def a__ ( self ) -> Union[str, Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _lowercase , )
return self.image_processor
| 558
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.