code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Optional[Any] = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 717
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class A__ ( __snake_case ):
def __init__( self , A_ , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , A_ = None , **A_ , ):
'''simple docstring'''
super().__init__(
features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , num_proc=A_ , **A_ , )
UpperCamelCase : Optional[int] = Generator(
cache_dir=A_ , features=A_ , generator=A_ , gen_kwargs=A_ , **A_ , )
def __UpperCamelCase( self ):
'''simple docstring'''
if self.streaming:
UpperCamelCase : Optional[Any] = self.builder.as_streaming_dataset(split="train" )
# Build regular (map-style) dataset
else:
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : List[Any] = None
UpperCamelCase : List[str] = None
self.builder.download_and_prepare(
download_config=A_ , download_mode=A_ , verification_mode=A_ , base_path=A_ , num_proc=self.num_proc , )
UpperCamelCase : int = self.builder.as_dataset(
split="train" , verification_mode=A_ , in_memory=self.keep_in_memory )
return dataset
| 38
| 0
|
def A_ ( _lowerCAmelCase , _lowerCAmelCase ):
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ) , end="\t" )
else:
print("INF" , end="\t" )
print()
def A_ ( _lowerCAmelCase , _lowerCAmelCase ):
UpperCamelCase : Optional[int] = [[float("inf" ) for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
UpperCamelCase : Dict = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_lowerCamelCase ):
# looping through rows of graph array
for i in range(_lowerCamelCase ):
# looping through columns of graph array
for j in range(_lowerCamelCase ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
UpperCamelCase : Any = dist[i][k] + dist[k][j]
_print_dist(_lowerCamelCase , _lowerCamelCase )
return dist, v
if __name__ == "__main__":
__lowerCamelCase : Dict = int(input("""Enter number of vertices: """))
__lowerCamelCase : Optional[Any] = int(input("""Enter number of edges: """))
__lowerCamelCase : str = [[float("""inf""") for i in range(v)] for j in range(v)]
for i in range(v):
__lowerCamelCase : List[str] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("""\nEdge """, i + 1)
__lowerCamelCase : List[str] = int(input("""Enter source:"""))
__lowerCamelCase : Optional[Any] = int(input("""Enter destination:"""))
__lowerCamelCase : str = float(input("""Enter weight:"""))
__lowerCamelCase : Dict = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 718
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def A_ ( _lowerCAmelCase ) -> Union[str, Any]: # picklable for multiprocessing
return x.sum()
def A_ ( _lowerCAmelCase ) -> Optional[Any]: # picklable for multiprocessing
return i + 1
@dataclass
class A__ :
_UpperCAmelCase :int
_UpperCAmelCase :str
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[Any] = []
UpperCamelCase : List[Any] = 1
UpperCamelCase : Tuple = [1, 2]
UpperCamelCase : Optional[Any] = {"a": 1, "b": 2}
UpperCamelCase : Optional[Any] = {"a": [1, 2], "b": [3, 4]}
UpperCamelCase : Any = {"a": {"1": 1}, "b": 2}
UpperCamelCase : List[str] = {"a": 1, "b": 2, "c": 3, "d": 4}
UpperCamelCase : Dict = {}
UpperCamelCase : Any = []
UpperCamelCase : Any = 2
UpperCamelCase : Any = [2, 3]
UpperCamelCase : Optional[Any] = {"a": 2, "b": 3}
UpperCamelCase : List[Any] = {"a": [2, 3], "b": [4, 5]}
UpperCamelCase : Tuple = {"a": {"1": 2}, "b": 3}
UpperCamelCase : Dict = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
UpperCamelCase : List[str] = 2
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
UpperCamelCase : List[str] = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
UpperCamelCase : int = {"a": 2, "b": 0, "c": 2}
UpperCamelCase : Union[str, Any] = {
"a": np.eye(2 ).astype(A_ ),
"b": np.zeros(3 ).astype(A_ ),
"c": np.ones(2 ).astype(A_ ),
}
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(A_ ): # can't pickle a local lambda
map_nested(lambda A_ : x + 1 , A_ , num_proc=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {"a": 1, "b": 2}
UpperCamelCase : List[Any] = {"a": 3, "b": 4}
UpperCamelCase : Tuple = {"a": 5, "b": 6}
UpperCamelCase : Union[str, Any] = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(A_ , A_ , A_ ) ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
class A__ :
_UpperCAmelCase :str = 'bar'
UpperCamelCase : List[Any] = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(A_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
UpperCamelCase : Union[str, Any] = {F"""{i}""": i for i in range(_lowerCAmelCase )}
UpperCamelCase : List[str] = map_nested(lambda _lowerCAmelCase : x + 10 , _lowerCAmelCase , num_proc=_lowerCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A__ ( __snake_case ):
@require_tf
def __UpperCamelCase( self ):
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase : int = layers.Dense(2 )
def gen_random_output():
UpperCamelCase : Optional[Any] = tf.random.uniform((1, 3) )
return model(A_ ).numpy()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : List[Any] = gen_random_output()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : Dict = gen_random_output()
UpperCamelCase : Optional[int] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
import torch
def gen_random_output():
UpperCamelCase : Optional[Any] = torch.nn.Linear(3 , 2 )
UpperCamelCase : Dict = torch.rand(1 , 3 )
return model(A_ ).detach().numpy()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : Dict = gen_random_output()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : Optional[int] = gen_random_output()
UpperCamelCase : List[Any] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __UpperCamelCase( self ):
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase : Optional[Any] = gen_random_output()
with temp_seed(42 ):
UpperCamelCase : Optional[Any] = gen_random_output()
UpperCamelCase : Optional[Any] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Optional[Any] = NestedDataStructure(_lowerCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
UpperCamelCase : Dict = NestedDataStructure(_lowerCAmelCase ).flatten()
assert output == expected_output
def A_ ( ) -> List[Any]:
UpperCamelCase : str = A(x=1 , y="foobar" )
UpperCamelCase : Tuple = {"x": 1, "y": "foobar"}
assert asdict(_lowerCAmelCase ) == expected_output
UpperCamelCase : List[str] = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
UpperCamelCase : Tuple = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(_lowerCAmelCase ) == expected_output
with pytest.raises(_lowerCAmelCase ):
asdict([1, A(x=10 , y="foo" )] )
def A_ ( _lowerCAmelCase ) -> Tuple:
return text.split()
def A_ ( _lowerCAmelCase ) -> Dict:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def A_ ( ) -> str:
with Pool(2 ) as pool:
UpperCamelCase : List[str] = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase : Dict = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase : Any = []
for yield_time, content in iflatmap_unordered(
_lowerCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_lowerCAmelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(_lowerCAmelCase ) == 4
| 38
| 0
|
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
UpperCamelCase : Union[str, Any] = len(lowercase_ )
UpperCamelCase : List[Any] = len(lowercase_ )
UpperCamelCase : List[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
UpperCamelCase : str = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
UpperCamelCase : int = True
if a[i].islower():
UpperCamelCase : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Tuple = ['note_seq']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["note_seq"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
| 38
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowerCamelCase : Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class A__ :
_UpperCAmelCase :Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
_UpperCAmelCase :Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_UpperCAmelCase :Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The column name of the images in the files.'} )
_UpperCAmelCase :Optional[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'A folder containing the training data.'} )
_UpperCAmelCase :Optional[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'A folder containing the validation data.'} )
_UpperCAmelCase :Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
_UpperCAmelCase :Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_UpperCAmelCase :Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = {}
if self.train_dir is not None:
UpperCamelCase : Dict = self.train_dir
if self.validation_dir is not None:
UpperCamelCase : Optional[int] = self.validation_dir
UpperCamelCase : str = data_files if data_files else None
@dataclass
class A__ :
_UpperCAmelCase :str = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
_UpperCAmelCase :Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
_UpperCAmelCase :Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
_UpperCAmelCase :Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
_UpperCAmelCase :str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_UpperCAmelCase :str = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Name or path of preprocessor config.'} )
_UpperCAmelCase :bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_UpperCAmelCase :float = field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
_UpperCAmelCase :bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class A__ ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase :float = field(
default=1e-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def A_ ( _lowerCAmelCase ) -> List[str]:
UpperCamelCase : Dict = torch.stack([example["pixel_values"] for example in examples] )
return {"pixel_values": pixel_values}
def A_ ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase : int = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mae" , lowerCAmelCase__ , lowerCAmelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase : Dict = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
UpperCamelCase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase : str = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCAmelCase__ ) and data_args.train_val_split > 0.0:
UpperCamelCase : List[str] = ds["train"].train_test_split(data_args.train_val_split )
UpperCamelCase : List[Any] = split["train"]
UpperCamelCase : Any = split["test"]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : Any = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase : Optional[int] = ViTMAEConfig.from_pretrained(model_args.config_name , **lowerCAmelCase__ )
elif model_args.model_name_or_path:
UpperCamelCase : str = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase__ )
else:
UpperCamelCase : List[Any] = ViTMAEConfig()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
"mask_ratio": model_args.mask_ratio,
"norm_pix_loss": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase : str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowerCAmelCase__ )
elif model_args.model_name_or_path:
UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase__ )
else:
UpperCamelCase : Optional[Any] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase : Dict = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
UpperCamelCase : Union[str, Any] = ViTMAEForPreTraining(lowerCAmelCase__ )
if training_args.do_train:
UpperCamelCase : Any = ds["train"].column_names
else:
UpperCamelCase : Any = ds["validation"].column_names
if data_args.image_column_name is not None:
UpperCamelCase : List[str] = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase : str = "image"
elif "img" in column_names:
UpperCamelCase : List[str] = "img"
else:
UpperCamelCase : List[str] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase : Tuple = image_processor.size["shortest_edge"]
else:
UpperCamelCase : Tuple = (image_processor.size["height"], image_processor.size["width"])
UpperCamelCase : Dict = Compose(
[
Lambda(lambda _lowerCAmelCase : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowerCAmelCase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_lowerCAmelCase ):
UpperCamelCase : Optional[int] = [transforms(lowerCAmelCase__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
UpperCamelCase : Dict = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowerCAmelCase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
UpperCamelCase : Dict = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowerCAmelCase__ )
# Compute absolute learning rate
UpperCamelCase : List[str] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase : Tuple = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase : Tuple = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , )
# Training
if training_args.do_train:
UpperCamelCase : int = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase : Union[str, Any] = last_checkpoint
UpperCamelCase : int = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase : Optional[int] = trainer.evaluate()
trainer.log_metrics("eval" , lowerCAmelCase__ )
trainer.save_metrics("eval" , lowerCAmelCase__ )
# Write model card and (optionally) push to hub
UpperCamelCase : Union[str, Any] = {
"tasks": "masked-auto-encoding",
"dataset": data_args.dataset_name,
"tags": ["masked-auto-encoding"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 720
|
import math
import tensorflow as tf
from packaging import version
def A_ ( _lowerCAmelCase ) -> Any:
UpperCamelCase : List[Any] = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : Any = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def A_ ( _lowerCAmelCase ) -> Dict:
UpperCamelCase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(math.pi , x.dtype )
UpperCamelCase : Optional[Any] = tf.cast(0.044_715 , x.dtype )
UpperCamelCase : int = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_lowerCAmelCase , 3 )) ))
return x * cdf
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : str = tf.convert_to_tensor(_lowerCAmelCase )
return x * tf.tanh(tf.math.softplus(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Tuple = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(0.044_715 , x.dtype )
UpperCamelCase : Optional[Any] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : Any = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def A_ ( _lowerCAmelCase ) -> List[Any]:
return tf.clip_by_value(_gelu(_lowerCAmelCase ) , -10 , 10 )
def A_ ( _lowerCAmelCase , _lowerCAmelCase=-1 ) -> str:
UpperCamelCase , UpperCamelCase : List[Any] = tf.split(_lowerCAmelCase , 2 , axis=_lowerCAmelCase )
return a * tf.math.sigmoid(_lowerCAmelCase )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def A_ ( _lowerCAmelCase ) -> Any:
return tf.keras.activations.gelu(_lowerCAmelCase , approximate=_lowerCAmelCase )
__lowerCamelCase : Optional[int] = tf.keras.activations.gelu
__lowerCamelCase : int = approximate_gelu_wrap
else:
__lowerCamelCase : List[Any] = _gelu
__lowerCamelCase : Optional[Any] = _gelu_new
__lowerCamelCase : Any = {
"""gelu""": gelu,
"""gelu_10""": gelu_aa,
"""gelu_fast""": gelu_fast,
"""gelu_new""": gelu_new,
"""glu""": glu,
"""mish""": mish,
"""quick_gelu""": quick_gelu,
"""relu""": tf.keras.activations.relu,
"""sigmoid""": tf.keras.activations.sigmoid,
"""silu""": tf.keras.activations.swish,
"""swish""": tf.keras.activations.swish,
"""tanh""": tf.keras.activations.tanh,
}
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 38
| 0
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def A_ ( _lowerCAmelCase ) -> Tuple:
UpperCamelCase : Dict = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_lowerCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCamelCase : str = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
UpperCamelCase : List[str] = [[0.0, 0.0], [0.0, 0.0]]
UpperCamelCase , UpperCamelCase : Union[str, Any] = matrix[1][1], matrix[0][0]
UpperCamelCase , UpperCamelCase : Optional[Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_lowerCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_lowerCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCamelCase : str = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
UpperCamelCase : List[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCamelCase : Union[str, Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCamelCase : List[str] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCamelCase : Any = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCamelCase : str = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCamelCase : List[str] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCamelCase : str = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCamelCase : Union[str, Any] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCamelCase : Tuple = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCamelCase : int = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCamelCase : Optional[Any] = array(_lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
UpperCamelCase : Union[str, Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCamelCase : Optional[int] = array(_lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_lowerCamelCase )
# Calculate the inverse of the matrix
return [[float(d(_lowerCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 721
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :str = KandinskyVaaPipeline
_UpperCAmelCase :str = [
'image_embeds',
'negative_image_embeds',
]
_UpperCAmelCase :str = ['image_embeds', 'negative_image_embeds']
_UpperCAmelCase :List[str] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCAmelCase :List[str] = False
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 100
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : List[str] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase : Dict = UNetaDConditionModel(**A_ )
return model
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.dummy_unet
UpperCamelCase : Optional[Any] = self.dummy_movq
UpperCamelCase : Dict = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , )
UpperCamelCase : Tuple = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCamelCase( self , A_ , A_=0 ):
'''simple docstring'''
UpperCamelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
if str(A_ ).startswith("mps" ):
UpperCamelCase : Optional[Any] = torch.manual_seed(A_ )
else:
UpperCamelCase : List[Any] = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase : Optional[int] = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = "cpu"
UpperCamelCase : List[str] = self.get_dummy_components()
UpperCamelCase : Tuple = self.pipeline_class(**A_ )
UpperCamelCase : List[str] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Dict = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase : Optional[int] = output.images
UpperCamelCase : int = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : int = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
UpperCamelCase : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
UpperCamelCase : Dict = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCamelCase : Tuple = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase : str = "red cat, 4k photo"
UpperCamelCase : str = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase : Tuple = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase : int = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase : Tuple = pipeline(
image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , output_type="np" , )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A_ , A_ )
| 38
| 0
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class A__ ( snake_case_ , snake_case_ ):
@register_to_config
def __init__( self , A_ = 128 , A_ = 256 , A_ = 20_00.0 , A_ = 768 , A_ = 12 , A_ = 12 , A_ = 64 , A_ = 2048 , A_ = 0.1 , ):
'''simple docstring'''
super().__init__()
UpperCamelCase : str = nn.Sequential(
nn.Linear(A_ , d_model * 4 , bias=A_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=A_ ) , nn.SiLU() , )
UpperCamelCase : int = nn.Embedding(A_ , A_ )
UpperCamelCase : List[str] = False
UpperCamelCase : Optional[Any] = nn.Linear(A_ , A_ , bias=A_ )
UpperCamelCase : Any = nn.Dropout(p=A_ )
UpperCamelCase : Union[str, Any] = nn.ModuleList()
for lyr_num in range(A_ ):
# FiLM conditional T5 decoder
UpperCamelCase : List[str] = DecoderLayer(d_model=A_ , d_kv=A_ , num_heads=A_ , d_ff=A_ , dropout_rate=A_ )
self.decoders.append(A_ )
UpperCamelCase : Tuple = TaLayerNorm(A_ )
UpperCamelCase : Tuple = nn.Dropout(p=A_ )
UpperCamelCase : int = nn.Linear(A_ , A_ , bias=A_ )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCamelCase : Optional[int] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCamelCase : Union[str, Any] = self.conditioning_emb(A_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCamelCase : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCamelCase : Tuple = torch.broadcast_to(
torch.arange(A_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCamelCase : Any = self.position_encoding(A_ )
UpperCamelCase : Any = self.continuous_inputs_projection(A_ )
inputs += position_encodings
UpperCamelCase : Optional[int] = self.dropout(A_ )
# decoder: No padding present.
UpperCamelCase : int = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCamelCase : int = [(x, self.encoder_decoder_mask(A_ , A_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCamelCase : Union[str, Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCamelCase : Optional[Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCamelCase : Optional[int] = lyr(
A_ , conditioning_emb=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )[0]
UpperCamelCase : List[str] = self.decoder_norm(A_ )
UpperCamelCase : Any = self.post_dropout(A_ )
UpperCamelCase : Optional[Any] = self.spec_out(A_ )
return spec_out
class A__ ( nn.Module ):
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_=1e-6 ):
'''simple docstring'''
super().__init__()
UpperCamelCase : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=A_ , d_kv=A_ , num_heads=A_ , dropout_rate=A_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=A_ , d_kv=A_ , num_heads=A_ , dropout_rate=A_ , layer_norm_epsilon=A_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=A_ , d_ff=A_ , dropout_rate=A_ , layer_norm_epsilon=A_ ) )
def __UpperCamelCase( self , A_ , A_=None , A_=None , A_=None , A_=None , A_=None , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.layer[0](
A_ , conditioning_emb=A_ , attention_mask=A_ , )
if encoder_hidden_states is not None:
UpperCamelCase : str = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
UpperCamelCase : Dict = self.layer[1](
A_ , key_value_states=A_ , attention_mask=A_ , )
# Apply Film Conditional Feed Forward layer
UpperCamelCase : str = self.layer[-1](A_ , A_ )
return (hidden_states,)
class A__ ( nn.Module ):
def __init__( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase : str = TaLayerNorm(A_ )
UpperCamelCase : int = TaFiLMLayer(in_features=d_model * 4 , out_features=A_ )
UpperCamelCase : Optional[Any] = Attention(query_dim=A_ , heads=A_ , dim_head=A_ , out_bias=A_ , scale_qk=A_ )
UpperCamelCase : int = nn.Dropout(A_ )
def __UpperCamelCase( self , A_ , A_=None , A_=None , ):
'''simple docstring'''
UpperCamelCase : int = self.layer_norm(A_ )
if conditioning_emb is not None:
UpperCamelCase : Any = self.FiLMLayer(A_ , A_ )
# Self-attention block
UpperCamelCase : Tuple = self.attention(A_ )
UpperCamelCase : Optional[int] = hidden_states + self.dropout(A_ )
return hidden_states
class A__ ( nn.Module ):
def __init__( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase : List[Any] = Attention(query_dim=A_ , heads=A_ , dim_head=A_ , out_bias=A_ , scale_qk=A_ )
UpperCamelCase : List[str] = TaLayerNorm(A_ , eps=A_ )
UpperCamelCase : List[Any] = nn.Dropout(A_ )
def __UpperCamelCase( self , A_ , A_=None , A_=None , ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.layer_norm(A_ )
UpperCamelCase : str = self.attention(
A_ , encoder_hidden_states=A_ , attention_mask=attention_mask.squeeze(1 ) , )
UpperCamelCase : str = hidden_states + self.dropout(A_ )
return layer_output
class A__ ( nn.Module ):
def __init__( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase : Optional[int] = TaDenseGatedActDense(d_model=A_ , d_ff=A_ , dropout_rate=A_ )
UpperCamelCase : Optional[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=A_ )
UpperCamelCase : Tuple = TaLayerNorm(A_ , eps=A_ )
UpperCamelCase : Dict = nn.Dropout(A_ )
def __UpperCamelCase( self , A_ , A_=None ):
'''simple docstring'''
UpperCamelCase : Any = self.layer_norm(A_ )
if conditioning_emb is not None:
UpperCamelCase : Union[str, Any] = self.film(A_ , A_ )
UpperCamelCase : Union[str, Any] = self.DenseReluDense(A_ )
UpperCamelCase : str = hidden_states + self.dropout(A_ )
return hidden_states
class A__ ( nn.Module ):
def __init__( self , A_ , A_ , A_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase : Tuple = nn.Linear(A_ , A_ , bias=A_ )
UpperCamelCase : Any = nn.Linear(A_ , A_ , bias=A_ )
UpperCamelCase : List[Any] = nn.Linear(A_ , A_ , bias=A_ )
UpperCamelCase : Optional[int] = nn.Dropout(A_ )
UpperCamelCase : List[str] = NewGELUActivation()
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = self.act(self.wi_a(A_ ) )
UpperCamelCase : str = self.wi_a(A_ )
UpperCamelCase : List[str] = hidden_gelu * hidden_linear
UpperCamelCase : int = self.dropout(A_ )
UpperCamelCase : str = self.wo(A_ )
return hidden_states
class A__ ( nn.Module ):
def __init__( self , A_ , A_=1e-6 ):
'''simple docstring'''
super().__init__()
UpperCamelCase : Any = nn.Parameter(torch.ones(A_ ) )
UpperCamelCase : Optional[Any] = eps
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Any = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=A_ )
UpperCamelCase : int = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCamelCase : Any = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class A__ ( nn.Module ):
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(A_ , 3.0 )) ))
class A__ ( nn.Module ):
def __init__( self , A_ , A_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase : Any = nn.Linear(A_ , out_features * 2 , bias=A_ )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = self.scale_bias(A_ )
UpperCamelCase , UpperCamelCase : Any = torch.chunk(A_ , 2 , -1 )
UpperCamelCase : Tuple = x * (1 + scale) + shift
return x
| 700
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A_ ( ) -> Dict:
UpperCamelCase : Tuple = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_lowerCAmelCase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_lowerCAmelCase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_lowerCAmelCase )
return parser.parse_args()
def A_ ( ) -> Optional[int]:
UpperCamelCase : Tuple = parse_args()
# Import training_script as a module.
UpperCamelCase : Union[str, Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCamelCase : List[Any] = script_fpath.stem
UpperCamelCase : Optional[Any] = importlib.import_module(_lowerCAmelCase )
# Patch sys.argv
UpperCamelCase : List[Any] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 38
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : int = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 701
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Union[str, Any] = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 38
| 0
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__lowerCamelCase : Dict = logging.get_logger(__name__)
class A__ ( __snake_case ):
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , A_ , )
super().__init__(*A_ , **A_ )
| 702
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=10 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=None , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = size if size is not None else {"shortest_edge": 18}
UpperCamelCase : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCamelCase : Optional[Any] = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : List[Any] = num_channels
UpperCamelCase : Union[str, Any] = num_frames
UpperCamelCase : Any = image_size
UpperCamelCase : Tuple = min_resolution
UpperCamelCase : Optional[Any] = max_resolution
UpperCamelCase : Any = do_resize
UpperCamelCase : Tuple = size
UpperCamelCase : List[Any] = do_normalize
UpperCamelCase : Optional[int] = image_mean
UpperCamelCase : Any = image_std
UpperCamelCase : str = crop_size
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[str] = VivitImageProcessor if is_vision_available() else None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = VivitImageProcessingTester(self )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "do_center_crop" ) )
self.assertTrue(hasattr(A_ , "size" ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
UpperCamelCase : Any = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : str = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : str = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : Any = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : List[Any] = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 38
| 0
|
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__lowerCamelCase : Any = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__lowerCamelCase : List[Any] = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
__lowerCamelCase : Optional[int] = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCamelCase : List[str] = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__lowerCamelCase : List[Any] = [
("""pretraining""", """MODEL_FOR_PRETRAINING_MAPPING_NAMES""", """AutoModelForPreTraining"""),
("""feature-extraction""", """MODEL_MAPPING_NAMES""", """AutoModel"""),
("""audio-classification""", """MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioClassification"""),
("""text-generation""", """MODEL_FOR_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForCausalLM"""),
("""automatic-speech-recognition""", """MODEL_FOR_CTC_MAPPING_NAMES""", """AutoModelForCTC"""),
("""image-classification""", """MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForImageClassification"""),
("""image-segmentation""", """MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES""", """AutoModelForImageSegmentation"""),
("""fill-mask""", """MODEL_FOR_MASKED_LM_MAPPING_NAMES""", """AutoModelForMaskedLM"""),
("""object-detection""", """MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForObjectDetection"""),
(
"""zero-shot-object-detection""",
"""MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES""",
"""AutoModelForZeroShotObjectDetection""",
),
("""question-answering""", """MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForQuestionAnswering"""),
("""text2text-generation""", """MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForSeq2SeqLM"""),
("""text-classification""", """MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForSequenceClassification"""),
("""automatic-speech-recognition""", """MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES""", """AutoModelForSpeechSeq2Seq"""),
(
"""table-question-answering""",
"""MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForTableQuestionAnswering""",
),
("""token-classification""", """MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForTokenClassification"""),
("""multiple-choice""", """MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES""", """AutoModelForMultipleChoice"""),
(
"""next-sentence-prediction""",
"""MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES""",
"""AutoModelForNextSentencePrediction""",
),
(
"""audio-frame-classification""",
"""MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForAudioFrameClassification""",
),
("""audio-xvector""", """MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES""", """AutoModelForAudioXVector"""),
(
"""document-question-answering""",
"""MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForDocumentQuestionAnswering""",
),
(
"""visual-question-answering""",
"""MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForVisualQuestionAnswering""",
),
("""image-to-text""", """MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES""", """AutoModelForVision2Seq"""),
(
"""zero-shot-image-classification""",
"""MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForZeroShotImageClassification""",
),
("""depth-estimation""", """MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES""", """AutoModelForDepthEstimation"""),
("""video-classification""", """MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForVideoClassification"""),
("""mask-generation""", """MODEL_FOR_MASK_GENERATION_MAPPING_NAMES""", """AutoModelForMaskGeneration"""),
]
def A_ ( _lowerCAmelCase ) -> Tuple:
UpperCamelCase : Any = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , __lowercase )
return [m.group(0 ) for m in matches]
def A_ ( ) -> Any:
UpperCamelCase : Union[str, Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCamelCase : Dict = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
UpperCamelCase : List[Any] = collections.defaultdict(__lowercase )
UpperCamelCase : Optional[int] = collections.defaultdict(__lowercase )
UpperCamelCase : Tuple = collections.defaultdict(__lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__lowercase ):
UpperCamelCase : Tuple = None
if _re_tf_models.match(__lowercase ) is not None:
UpperCamelCase : List[str] = tf_models
UpperCamelCase : Any = _re_tf_models.match(__lowercase ).groups()[0]
elif _re_flax_models.match(__lowercase ) is not None:
UpperCamelCase : Tuple = flax_models
UpperCamelCase : int = _re_flax_models.match(__lowercase ).groups()[0]
elif _re_pt_models.match(__lowercase ) is not None:
UpperCamelCase : Tuple = pt_models
UpperCamelCase : Tuple = _re_pt_models.match(__lowercase ).groups()[0]
if lookup_dict is not None:
while len(__lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
UpperCamelCase : Any = True
break
# Try again after removing the last word in the name
UpperCamelCase : Dict = ''.join(camel_case_split(__lowercase )[:-1] )
UpperCamelCase : Any = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
UpperCamelCase : Tuple = list(__lowercase )
all_models.sort()
UpperCamelCase : str = {'model_type': all_models}
UpperCamelCase : Optional[int] = [pt_models[t] for t in all_models]
UpperCamelCase : str = [tf_models[t] for t in all_models]
UpperCamelCase : List[Any] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
UpperCamelCase : List[Any] = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
UpperCamelCase : List[Any] = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
UpperCamelCase : Optional[int] = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
UpperCamelCase : List[str] = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
UpperCamelCase : Dict = 'AutoTokenizer'
UpperCamelCase : List[Any] = [processors[t] for t in all_models]
return pd.DataFrame(__lowercase )
def A_ ( _lowerCAmelCase ) -> Optional[int]:
UpperCamelCase : Tuple = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
UpperCamelCase : Any = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""]
UpperCamelCase : Optional[Any] = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(__lowercase , __lowercase , __lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(__lowercase , __lowercase ):
continue
# First extract all model_names
UpperCamelCase : Optional[int] = []
for name in getattr(__lowercase , __lowercase ).values():
if isinstance(__lowercase , __lowercase ):
model_names.append(__lowercase )
else:
model_names.extend(list(__lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
UpperCamelCase : Any = get_frameworks_table()
UpperCamelCase : Any = Dataset.from_pandas(__lowercase )
UpperCamelCase : Any = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=__lowercase )
UpperCamelCase : Dict = Dataset.from_json(__lowercase )
UpperCamelCase : Any = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(__lowercase ) )
}
UpperCamelCase : str = update_pipeline_and_auto_class_table(__lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
UpperCamelCase : Optional[int] = sorted(table.keys() )
UpperCamelCase : Union[str, Any] = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
UpperCamelCase : Any = Dataset.from_pandas(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__lowercase , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(__lowercase , "pipeline_tags.json" ) )
if commit_sha is not None:
UpperCamelCase : Optional[Any] = (
F"""Update with commit {commit_sha}\n\nSee: """
F"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
UpperCamelCase : Optional[int] = 'Update'
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=__lowercase , repo_type="dataset" , token=__lowercase , commit_message=__lowercase , )
def A_ ( ) -> int:
UpperCamelCase : str = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
UpperCamelCase : List[Any] = transformers_module.pipelines.SUPPORTED_TASKS
UpperCamelCase : Optional[int] = []
for key in pipeline_tasks:
if key not in in_table:
UpperCamelCase : str = pipeline_tasks[key]['pt']
if isinstance(__lowercase , (list, tuple) ):
UpperCamelCase : Dict = model[0]
UpperCamelCase : Optional[int] = model.__name__
if model not in in_table.values():
missing.append(__lowercase )
if len(__lowercase ) > 0:
UpperCamelCase : Any = ', '.join(__lowercase )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
F"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("""--token""", type=str, help="""The token to use to push to the transformers-metadata dataset.""")
parser.add_argument("""--commit_sha""", type=str, help="""The sha of the commit going with this update.""")
parser.add_argument("""--check-only""", action="""store_true""", help="""Activate to just check all pipelines are present.""")
__lowerCamelCase : List[str] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 703
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__lowerCamelCase : Dict = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__lowerCamelCase : Tuple = {
"""facebook/blenderbot_small-90M""": 512,
}
class A__ ( __snake_case ):
_UpperCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase :Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Optional[Any] = BlenderbotSmallTokenizer
def __init__( self , A_=None , A_=None , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|endoftext|>" , A_=False , A_=True , **A_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=A_ , merges=A_ , add_prefix_space=A_ , trim_offsets=A_ , ) , bos_token=A_ , eos_token=A_ , unk_token=A_ , **A_ , )
UpperCamelCase : Union[str, Any] = add_prefix_space
def __UpperCamelCase( self , A_ , A_=None ):
'''simple docstring'''
UpperCamelCase : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Tuple = [self.sep_token_id]
UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 38
| 0
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=1024 ) -> int:
UpperCamelCase , UpperCamelCase : str = [], []
UpperCamelCase : Optional[int] = list(zip(__snake_case , __snake_case ) )
UpperCamelCase , UpperCamelCase : List[str] = sorted_examples[0]
def is_too_big(_lowerCAmelCase ):
return tok(__snake_case , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCamelCase : Optional[int] = new_src + " " + src
UpperCamelCase : List[str] = new_tgt + " " + tgt
if is_too_big(__snake_case ) or is_too_big(__snake_case ): # cant fit, finalize example
finished_src.append(__snake_case )
finished_tgt.append(__snake_case )
UpperCamelCase , UpperCamelCase : Union[str, Any] = src, tgt
else: # can fit, keep adding
UpperCamelCase , UpperCamelCase : Tuple = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__snake_case )
finished_tgt.append(__snake_case )
return finished_src, finished_tgt
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
UpperCamelCase : Union[str, Any] = Path(__snake_case )
save_path.mkdir(exist_ok=__snake_case )
for split in ["train"]:
UpperCamelCase , UpperCamelCase : Optional[int] = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
UpperCamelCase : Tuple = [x.rstrip() for x in Path(__snake_case ).open().readlines()]
UpperCamelCase : Any = [x.rstrip() for x in Path(__snake_case ).open().readlines()]
UpperCamelCase , UpperCamelCase : Any = pack_examples(__snake_case , __snake_case , __snake_case , __snake_case )
print(F"""packed {split} split from {len(__snake_case )} examples -> {len(__snake_case )}.""" )
Path(save_path / F"""{split}.source""" ).open("w" ).write("\n".join(__snake_case ) )
Path(save_path / F"""{split}.target""" ).open("w" ).write("\n".join(__snake_case ) )
for split in ["val", "test"]:
UpperCamelCase , UpperCamelCase : Tuple = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(__snake_case , save_path / F"""{split}.source""" )
shutil.copyfile(__snake_case , save_path / F"""{split}.target""" )
def A_ ( ) -> Tuple:
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=__snake_case , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=__snake_case , default=128 )
parser.add_argument("--data_dir" , type=__snake_case )
parser.add_argument("--save_path" , type=__snake_case )
UpperCamelCase : Dict = parser.parse_args()
UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__snake_case , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 704
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : int = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 38
| 0
|
def A_ ( _lowerCAmelCase ) -> List[Any]:
if n == 1 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return 0
elif n == 2:
return 1
else:
UpperCamelCase : List[str] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def A_ ( _lowerCAmelCase ) -> Any:
UpperCamelCase : List[Any] = 0
UpperCamelCase : Any = 2
while digits < n:
index += 1
UpperCamelCase : Optional[Any] = len(str(fibonacci(lowerCAmelCase__ ) ) )
return index
def A_ ( _lowerCAmelCase = 1000 ) -> List[str]:
return fibonacci_digits_index(lowerCAmelCase__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 705
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__lowerCamelCase : str = None
try:
import msvcrt
except ImportError:
__lowerCamelCase : str = None
try:
import fcntl
except ImportError:
__lowerCamelCase : List[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
__lowerCamelCase : str = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
__lowerCamelCase : Union[str, Any] = """3.0.12"""
__lowerCamelCase : Any = None
def A_ ( ) -> List[Any]:
global _logger
UpperCamelCase : Any = _logger or logging.getLogger(__name__ )
return _logger
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = lock_file
return None
def __str__( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class A__ :
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = lock
return None
def __enter__( self ):
'''simple docstring'''
return self.lock
def __exit__( self , A_ , A_ , A_ ):
'''simple docstring'''
self.lock.release()
return None
class A__ :
def __init__( self , A_ , A_=-1 , A_=None ):
'''simple docstring'''
UpperCamelCase : List[Any] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
UpperCamelCase : Dict = self.hash_filename_if_too_long(A_ , A_ )
# The path to the lock file.
UpperCamelCase : List[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCamelCase : Tuple = None
# The default timeout value.
UpperCamelCase : Optional[Any] = timeout
# We use this lock primarily for the lock counter.
UpperCamelCase : Union[str, Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCamelCase : Dict = 0
return None
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._lock_file
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = float(A_ )
return None
def __UpperCamelCase( self ):
'''simple docstring'''
raise NotImplementedError()
def __UpperCamelCase( self ):
'''simple docstring'''
raise NotImplementedError()
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._lock_file_fd is not None
def __UpperCamelCase( self , A_=None , A_=0.05 ):
'''simple docstring'''
if timeout is None:
UpperCamelCase : Optional[Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCamelCase : Dict = id(self )
UpperCamelCase : List[str] = self._lock_file
UpperCamelCase : int = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(A_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCamelCase : List[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __UpperCamelCase( self , A_=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCamelCase : List[Any] = id(self )
UpperCamelCase : Dict = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
UpperCamelCase : Dict = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self , A_ , A_ , A_ ):
'''simple docstring'''
self.release()
return None
def __del__( self ):
'''simple docstring'''
self.release(force=A_ )
return None
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = os.path.basename(A_ )
if len(A_ ) > max_length and max_length > 0:
UpperCamelCase : Optional[int] = os.path.dirname(A_ )
UpperCamelCase : int = str(hash(A_ ) )
UpperCamelCase : Any = filename[: max_length - len(A_ ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(A_ , A_ )
else:
return path
class A__ ( __snake_case ):
def __init__( self , A_ , A_=-1 , A_=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(A_ , timeout=A_ , max_filename_length=A_ )
UpperCamelCase : List[Any] = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCamelCase : str = os.open(self._lock_file , A_ )
except OSError:
pass
else:
try:
msvcrt.locking(A_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(A_ )
else:
UpperCamelCase : Optional[Any] = fd
return None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self._lock_file_fd
UpperCamelCase : str = None
msvcrt.locking(A_ , msvcrt.LK_UNLCK , 1 )
os.close(A_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class A__ ( __snake_case ):
def __init__( self , A_ , A_=-1 , A_=None ):
'''simple docstring'''
UpperCamelCase : Tuple = os.statvfs(os.path.dirname(A_ ) ).f_namemax
super().__init__(A_ , timeout=A_ , max_filename_length=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCamelCase : int = os.open(self._lock_file , A_ )
try:
fcntl.flock(A_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(A_ )
else:
UpperCamelCase : List[str] = fd
return None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self._lock_file_fd
UpperCamelCase : List[Any] = None
fcntl.flock(A_ , fcntl.LOCK_UN )
os.close(A_ )
return None
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCamelCase : Optional[int] = os.open(self._lock_file , A_ )
except OSError:
pass
else:
UpperCamelCase : Tuple = fd
return None
def __UpperCamelCase( self ):
'''simple docstring'''
os.close(self._lock_file_fd )
UpperCamelCase : str = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__lowerCamelCase : Dict = None
if msvcrt:
__lowerCamelCase : Any = WindowsFileLock
elif fcntl:
__lowerCamelCase : Any = UnixFileLock
else:
__lowerCamelCase : int = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 38
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCamelCase : str = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 706
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ) -> str:
if config_name_or_path is None:
UpperCamelCase : Dict = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
UpperCamelCase : Tuple = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
UpperCamelCase : Tuple = question_encoder_name_or_path
UpperCamelCase : Any = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
UpperCamelCase : Optional[Any] = RagConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : Tuple = AutoConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : int = gen_config
UpperCamelCase : Dict = question_encoder_config
UpperCamelCase : Tuple = model_class.from_pretrained_question_encoder_generator(
_lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase )
rag_model.save_pretrained(_lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(_lowerCAmelCase )
# Save tokenizers.
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
__lowerCamelCase : Dict = parser.parse_args()
__lowerCamelCase : Dict = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 38
| 0
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__lowerCamelCase : Optional[Any] = TypeVar("""T""")
class A__ ( Generic[T] ):
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : str = data
UpperCamelCase : Optional[int] = None
def __str__( self ):
'''simple docstring'''
return F"""{self.data}"""
class A__ ( Generic[T] ):
def __init__( self ):
'''simple docstring'''
UpperCamelCase : Dict = None
def __iter__( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.top
while node:
yield node.data
UpperCamelCase : List[Any] = node.next
def __str__( self ):
'''simple docstring'''
return "->".join([str(A_ ) for item in self] )
def __len__( self ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __UpperCamelCase( self ):
'''simple docstring'''
return self.top is None
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : str = Node(A_ )
if not self.is_empty():
UpperCamelCase : Optional[int] = self.top
UpperCamelCase : Union[str, Any] = node
def __UpperCamelCase( self ):
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , A_ )
UpperCamelCase : Dict = self.top
UpperCamelCase : List[str] = self.top.next
return pop_node.data
def __UpperCamelCase( self ):
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 707
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
UpperCamelCase : Dict = parent
UpperCamelCase : str = 13
UpperCamelCase : int = 7
UpperCamelCase : str = True
UpperCamelCase : Dict = True
UpperCamelCase : str = True
UpperCamelCase : Tuple = True
UpperCamelCase : List[str] = 99
UpperCamelCase : Optional[Any] = 384
UpperCamelCase : Tuple = 2
UpperCamelCase : Union[str, Any] = 4
UpperCamelCase : Dict = 37
UpperCamelCase : Any = "gelu"
UpperCamelCase : List[Any] = 0.1
UpperCamelCase : int = 0.1
UpperCamelCase : Tuple = 512
UpperCamelCase : List[Any] = 16
UpperCamelCase : int = 2
UpperCamelCase : Dict = 0.02
UpperCamelCase : Optional[Any] = 3
UpperCamelCase : List[Any] = 4
UpperCamelCase : Dict = 128
UpperCamelCase : Optional[Any] = 2
UpperCamelCase : Optional[int] = 9
UpperCamelCase : Optional[int] = 1
UpperCamelCase : Union[str, Any] = None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : str = None
if self.use_input_mask:
UpperCamelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Tuple = None
if self.use_token_type_ids:
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : List[Any] = None
if self.use_labels:
UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Any = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = TFConvBertModel(config=A_ )
UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase : Optional[int] = [input_ids, input_mask]
UpperCamelCase : Any = model(A_ )
UpperCamelCase : int = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = TFConvBertForMaskedLM(config=A_ )
UpperCamelCase : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : Dict = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = self.num_labels
UpperCamelCase : int = TFConvBertForSequenceClassification(config=A_ )
UpperCamelCase : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = self.num_choices
UpperCamelCase : str = TFConvBertForMultipleChoice(config=A_ )
UpperCamelCase : List[Any] = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : Dict = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : Any = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : List[str] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCamelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = self.num_labels
UpperCamelCase : str = TFConvBertForTokenClassification(config=A_ )
UpperCamelCase : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : str = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = TFConvBertForQuestionAnswering(config=A_ )
UpperCamelCase : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : Union[str, Any] = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Optional[Any] = config_and_inputs
UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCAmelCase :Optional[Any] = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase :Any = False
_UpperCAmelCase :int = False
_UpperCAmelCase :str = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = TFConvBertModelTester(self )
UpperCamelCase : Dict = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Any = True
if hasattr(A_ , "use_cache" ):
UpperCamelCase : List[str] = True
UpperCamelCase : List[Any] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Any = getattr(self.model_tester , "key_length" , A_ )
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = self._prepare_for_class(A_ , A_ )
UpperCamelCase : Dict = model_class(A_ )
UpperCamelCase : Optional[int] = len(model(A_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ , saved_model=A_ )
UpperCamelCase : Union[str, Any] = os.path.join(A_ , "saved_model" , "1" )
UpperCamelCase : Dict = tf.keras.models.load_model(A_ )
UpperCamelCase : str = model(A_ )
if self.is_encoder_decoder:
UpperCamelCase : Union[str, Any] = outputs["encoder_hidden_states"]
UpperCamelCase : Any = outputs["encoder_attentions"]
else:
UpperCamelCase : Any = outputs["hidden_states"]
UpperCamelCase : List[str] = outputs["attentions"]
self.assertEqual(len(A_ ) , A_ )
UpperCamelCase : int = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A_ ) , A_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Dict = True
UpperCamelCase : int = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Optional[int] = getattr(self.model_tester , "key_length" , A_ )
UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , A_ )
def check_decoder_attentions_output(A_ ):
UpperCamelCase : Optional[Any] = len(A_ )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase : Any = outputs.decoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(A_ ):
UpperCamelCase : Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : List[Any] = False
UpperCamelCase : Dict = model_class(A_ )
UpperCamelCase : Dict = model(self._prepare_for_class(A_ , A_ ) )
UpperCamelCase : List[str] = len(A_ )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
if self.is_encoder_decoder:
UpperCamelCase : int = model_class(A_ )
UpperCamelCase : Tuple = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_decoder_attentions_output(A_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase : Tuple = True
UpperCamelCase : int = model_class(A_ )
UpperCamelCase : Dict = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
# Check attention is always last and order is fine
UpperCamelCase : Optional[int] = True
UpperCamelCase : List[str] = True
UpperCamelCase : Optional[int] = model_class(A_ )
UpperCamelCase : Optional[Any] = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) )
self.assertEqual(model.config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase : List[str] = model(A_ )[0]
UpperCamelCase : int = [1, 6, 768]
self.assertEqual(output.shape , A_ )
UpperCamelCase : List[str] = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1e-4 )
| 38
| 0
|
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__lowerCamelCase : Tuple = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__lowerCamelCase : int = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__lowerCamelCase : Dict = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = 0.0
for i, j in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
n_correct += 1.0 if math_equivalence.is_equiv(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 0.0
UpperCamelCase : Optional[int] = n_correct / len(_SCREAMING_SNAKE_CASE )
return {
"accuracy": accuracy,
}
| 708
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __snake_case ):
_UpperCAmelCase :Union[str, Any] = 'camembert'
def __init__( self , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase : List[str] = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : Dict = hidden_act
UpperCamelCase : str = intermediate_size
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : Union[str, Any] = max_position_embeddings
UpperCamelCase : Optional[Any] = type_vocab_size
UpperCamelCase : int = initializer_range
UpperCamelCase : List[str] = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : int = use_cache
UpperCamelCase : List[str] = classifier_dropout
class A__ ( __snake_case ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 38
| 0
|
from __future__ import annotations
import math
def A_ ( _lowerCAmelCase ) -> List[str]:
if num <= 0:
UpperCamelCase : List[str] = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(A_ )
UpperCamelCase : str = [True] * (num + 1)
UpperCamelCase : List[str] = []
UpperCamelCase : int = 2
UpperCamelCase : Any = int(math.sqrt(A_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(A_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , A_ ):
if sieve[i] is True:
UpperCamelCase : List[str] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(A_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 709
|
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
return int(input_a == input_a == 0 )
def A_ ( ) -> None:
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 38
| 0
|
'''simple docstring'''
def A_ ( _lowerCAmelCase ) -> bool:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("check_bouncy() accepts only integer arguments" )
UpperCamelCase : str = str(_lowerCAmelCase )
UpperCamelCase : str = ''.join(sorted(_lowerCAmelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def A_ ( _lowerCAmelCase = 99 ) -> int:
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
UpperCamelCase : Optional[Any] = 0
UpperCamelCase : List[str] = 1
while True:
if check_bouncy(_lowerCAmelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(99)}""")
| 710
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __snake_case ):
_UpperCAmelCase :Optional[int] = ['image_processor', 'tokenizer']
_UpperCAmelCase :Tuple = 'BlipImageProcessor'
_UpperCAmelCase :Optional[int] = 'AutoTokenizer'
def __init__( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = False
super().__init__(A_ , A_ )
UpperCamelCase : str = self.image_processor
def __call__( self , A_ = None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
UpperCamelCase : int = self.tokenizer
UpperCamelCase : Optional[int] = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
return text_encoding
# add pixel_values
UpperCamelCase : int = self.image_processor(A_ , return_tensors=A_ )
if text is not None:
UpperCamelCase : Dict = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
else:
UpperCamelCase : Dict = None
if text_encoding is not None:
encoding_image_processor.update(A_ )
return encoding_image_processor
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.tokenizer.model_input_names
UpperCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 38
| 0
|
'''simple docstring'''
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__lowerCamelCase : Optional[Any] = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class A__ ( __snake_case ):
_UpperCAmelCase :int = 'facebook/nllb-200-distilled-600M'
_UpperCAmelCase :int = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
_UpperCAmelCase :List[Any] = 'translator'
_UpperCAmelCase :str = AutoTokenizer
_UpperCAmelCase :List[Any] = AutoModelForSeqaSeqLM
_UpperCAmelCase :int = LANGUAGE_CODES
_UpperCAmelCase :int = ['text', 'text', 'text']
_UpperCAmelCase :Any = ['text']
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(F"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"""{tgt_lang} is not a supported language.""" )
UpperCamelCase : Any = self.lang_to_code[src_lang]
UpperCamelCase : List[str] = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__a , return_tensors="pt" , src_lang=__a , tgt_lang=__a )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return self.model.generate(**__a )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__a )
| 711
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
class A__ ( __snake_case ):
_UpperCAmelCase :Tuple = ['audio_values', 'audio_mask']
def __init__( self , A_=2048 , A_=1 , A_=[16, 16] , A_=128 , A_=4_4100 , A_=86 , A_=2048 , A_=0.0 , **A_ , ):
'''simple docstring'''
super().__init__(
feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ , )
UpperCamelCase : Optional[int] = spectrogram_length
UpperCamelCase : Dict = num_channels
UpperCamelCase : Optional[Any] = patch_size
UpperCamelCase : str = feature_size // self.patch_size[1]
UpperCamelCase : List[str] = n_fft
UpperCamelCase : int = sampling_rate // hop_length_to_sampling_rate
UpperCamelCase : Optional[int] = sampling_rate
UpperCamelCase : int = padding_value
UpperCamelCase : str = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A_ , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=A_ , norm="slaney" , mel_scale="slaney" , ).T
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = spectrogram(
A_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
UpperCamelCase : List[Any] = log_spec[:, :-1]
UpperCamelCase : Optional[int] = log_spec - 20.0
UpperCamelCase : str = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , A_ , A_ = None , A_ = True , A_ = None , A_ = False , A_ = False , **A_ , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCamelCase : Optional[int] = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
UpperCamelCase : str = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
UpperCamelCase : str = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A_ ):
UpperCamelCase : int = [np.asarray(A_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
UpperCamelCase : List[str] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
UpperCamelCase : str = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
UpperCamelCase : Tuple = np.array(A_ ).astype(np.floataa )
# convert into correct format for padding
UpperCamelCase : Union[str, Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
UpperCamelCase : Any = np.ones([len(A_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
UpperCamelCase : List[str] = padded_audio_features * self.padding_value
for i in range(len(A_ ) ):
UpperCamelCase : Union[str, Any] = audio_features[i]
UpperCamelCase : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
UpperCamelCase : Optional[Any] = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
UpperCamelCase : int = {"audio_values": padded_audio_features}
UpperCamelCase : Any = BatchFeature(data=A_ , tensor_type=A_ )
return encoded_inputs
| 38
| 0
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : str = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Optional[int] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
UpperCamelCase : str = 128
elif "12-12" in model_name:
UpperCamelCase : Union[str, Any] = 12
UpperCamelCase : Optional[int] = 12
elif "14-14" in model_name:
UpperCamelCase : Optional[Any] = 14
UpperCamelCase : int = 14
elif "16-16" in model_name:
UpperCamelCase : Optional[Any] = 16
UpperCamelCase : str = 16
else:
raise ValueError("Model not supported" )
UpperCamelCase : List[Any] = """huggingface/label-files"""
if "speech-commands" in model_name:
UpperCamelCase : Tuple = 35
UpperCamelCase : List[Any] = """speech-commands-v2-id2label.json"""
else:
UpperCamelCase : Optional[int] = 527
UpperCamelCase : Tuple = """audioset-id2label.json"""
UpperCamelCase : int = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="dataset" ) , "r" ) )
UpperCamelCase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
UpperCamelCase : Optional[Any] = idalabel
UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def A_ ( _lowerCAmelCase ) -> Union[str, Any]:
if "module.v" in name:
UpperCamelCase : Tuple = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
UpperCamelCase : Optional[int] = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
UpperCamelCase : List[str] = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
UpperCamelCase : Optional[Any] = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCamelCase : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
UpperCamelCase : int = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
UpperCamelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCamelCase : Optional[Any] = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCamelCase : Union[str, Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCamelCase : Dict = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCamelCase : Tuple = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCamelCase : List[Any] = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
UpperCamelCase : Dict = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
UpperCamelCase : Optional[int] = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
UpperCamelCase : Dict = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
for key in orig_state_dict.copy().keys():
UpperCamelCase : Tuple = orig_state_dict.pop(lowerCamelCase_ )
if "qkv" in key:
UpperCamelCase : int = key.split("." )
UpperCamelCase : int = int(key_split[3] )
UpperCamelCase : int = config.hidden_size
if "weight" in key:
UpperCamelCase : List[str] = val[:dim, :]
UpperCamelCase : Optional[Any] = val[dim : dim * 2, :]
UpperCamelCase : List[str] = val[-dim:, :]
else:
UpperCamelCase : Dict = val[:dim]
UpperCamelCase : List[str] = val[dim : dim * 2]
UpperCamelCase : List[Any] = val[-dim:]
else:
UpperCamelCase : Optional[int] = val
return orig_state_dict
def A_ ( _lowerCAmelCase ) -> Union[str, Any]:
UpperCamelCase : Optional[Any] = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
@torch.no_grad()
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Dict:
UpperCamelCase : int = get_audio_spectrogram_transformer_config(lowerCamelCase_ )
UpperCamelCase : Optional[int] = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
UpperCamelCase : Dict = model_name_to_url[model_name]
UpperCamelCase : str = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="cpu" )
# remove some keys
remove_keys(lowerCamelCase_ )
# rename some keys
UpperCamelCase : Optional[Any] = convert_state_dict(lowerCamelCase_ , lowerCamelCase_ )
# load 🤗 model
UpperCamelCase : List[str] = ASTForAudioClassification(lowerCamelCase_ )
model.eval()
model.load_state_dict(lowerCamelCase_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
UpperCamelCase : List[Any] = -4.2_677_393 if """speech-commands""" not in model_name else -6.845_978
UpperCamelCase : List[str] = 4.5_689_974 if """speech-commands""" not in model_name else 5.5_654_526
UpperCamelCase : Union[str, Any] = 1024 if """speech-commands""" not in model_name else 128
UpperCamelCase : Tuple = ASTFeatureExtractor(mean=lowerCamelCase_ , std=lowerCamelCase_ , max_length=lowerCamelCase_ )
if "speech-commands" in model_name:
UpperCamelCase : Dict = load_dataset("speech_commands" , "v0.02" , split="validation" )
UpperCamelCase : Tuple = dataset[0]["""audio"""]["""array"""]
else:
UpperCamelCase : Optional[Any] = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
UpperCamelCase : Any = torchaudio.load(lowerCamelCase_ )
UpperCamelCase : Optional[Any] = waveform.squeeze().numpy()
UpperCamelCase : Dict = feature_extractor(lowerCamelCase_ , sampling_rate=1_6000 , return_tensors="pt" )
# forward pass
UpperCamelCase : Any = model(**lowerCamelCase_ )
UpperCamelCase : List[str] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
UpperCamelCase : Optional[int] = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
UpperCamelCase : List[str] = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
UpperCamelCase : Union[str, Any] = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
UpperCamelCase : Union[str, Any] = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
UpperCamelCase : Any = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
UpperCamelCase : List[str] = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
UpperCamelCase : int = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
UpperCamelCase : int = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , lowerCamelCase_ , atol=1e-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase_ )
print(F"""Saving feature extractor to {pytorch_dump_folder_path}""" )
feature_extractor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F"""MIT/{model_name}""" )
feature_extractor.push_to_hub(F"""MIT/{model_name}""" )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 712
|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCamelCase : Dict = TypeVar("""KT""")
__lowerCamelCase : Dict = TypeVar("""VT""")
class A__ ( Generic[KT, VT] ):
def __init__( self , A_ = "root" , A_ = None ):
'''simple docstring'''
UpperCamelCase : int = key
UpperCamelCase : List[Any] = value
UpperCamelCase : list[Node[KT, VT]] = []
def __repr__( self ):
'''simple docstring'''
return F"""Node({self.key}: {self.value})"""
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return len(self.forward )
class A__ ( Generic[KT, VT] ):
def __init__( self , A_ = 0.5 , A_ = 16 ):
'''simple docstring'''
UpperCamelCase : Node[KT, VT] = Node[KT, VT]()
UpperCamelCase : List[Any] = 0
UpperCamelCase : Union[str, Any] = p
UpperCamelCase : List[str] = max_level
def __str__( self ):
'''simple docstring'''
UpperCamelCase : int = list(self )
if len(A_ ) == 0:
return F"""SkipList(level={self.level})"""
UpperCamelCase : str = max((len(str(A_ ) ) for item in items) , default=4 )
UpperCamelCase : Dict = max(A_ , 4 ) + 4
UpperCamelCase : str = self.head
UpperCamelCase : List[Any] = []
UpperCamelCase : int = node.forward.copy()
lines.append(F"""[{node.key}]""".ljust(A_ , "-" ) + "* " * len(A_ ) )
lines.append(" " * label_size + "| " * len(A_ ) )
while len(node.forward ) != 0:
UpperCamelCase : Union[str, Any] = node.forward[0]
lines.append(
F"""[{node.key}]""".ljust(A_ , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(A_ ) )
UpperCamelCase : Tuple = node.forward
lines.append("None".ljust(A_ ) + "* " * len(A_ ) )
return F"""SkipList(level={self.level})\n""" + "\n".join(A_ )
def __iter__( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
UpperCamelCase : Union[str, Any] = node.forward[0]
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = []
UpperCamelCase : List[Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
UpperCamelCase : str = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(A_ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : str = self._locate_node(A_ )
if node is not None:
for i, update_node in enumerate(A_ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
UpperCamelCase : Tuple = node.forward[i]
else:
UpperCamelCase : List[Any] = update_node.forward[:i]
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[int] = self._locate_node(A_ )
if node is not None:
UpperCamelCase : Union[str, Any] = value
else:
UpperCamelCase : Dict = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , A_ ):
update_vector.append(self.head )
UpperCamelCase : Optional[int] = level
UpperCamelCase : Dict = Node(A_ , A_ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(A_ )
else:
UpperCamelCase : List[Any] = new_node
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Union[str, Any] = self._locate_node(A_ )
if node is not None:
return node.value
return None
def A_ ( ) -> List[Any]:
UpperCamelCase : int = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 12 )
skip_list.insert("Key3" , 41 )
skip_list.insert("Key4" , -19 )
UpperCamelCase : Optional[int] = skip_list.head
UpperCamelCase : List[str] = {}
while node.level != 0:
UpperCamelCase : str = node.forward[0]
UpperCamelCase : Optional[int] = node.value
assert len(_lowerCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def A_ ( ) -> List[Any]:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert("Key1" , 10 )
skip_list.insert("Key1" , 12 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 10 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 10 )
UpperCamelCase : Dict = skip_list.head
UpperCamelCase : Tuple = {}
while node.level != 0:
UpperCamelCase : List[str] = node.forward[0]
UpperCamelCase : Dict = node.value
if len(_lowerCAmelCase ) != 4:
print()
assert len(_lowerCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def A_ ( ) -> List[Any]:
UpperCamelCase : List[Any] = SkipList()
assert skip_list.find("Some key" ) is None
def A_ ( ) -> Tuple:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert("Key2" , 20 )
assert skip_list.find("Key2" ) == 20
skip_list.insert("Some Key" , 10 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 13 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 10
assert skip_list.find("V" ) == 13
def A_ ( ) -> Dict:
UpperCamelCase : Optional[int] = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def A_ ( ) -> Dict:
UpperCamelCase : List[Any] = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def A_ ( ) -> List[str]:
UpperCamelCase : int = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 14
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def A_ ( ) -> List[Any]:
UpperCamelCase : List[Any] = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 142 )
skip_list.insert("Key2" , 15 )
skip_list.delete("X" )
def traverse_keys(_lowerCAmelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_lowerCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def A_ ( ) -> Union[str, Any]:
def is_sorted(_lowerCAmelCase ):
return all(next_item >= item for item, next_item in zip(_lowerCAmelCase , lst[1:] ) )
UpperCamelCase : int = SkipList()
for i in range(10 ):
skip_list.insert(_lowerCAmelCase , _lowerCAmelCase )
assert is_sorted(list(_lowerCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_lowerCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_lowerCAmelCase ) )
def A_ ( ) -> Tuple:
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def A_ ( ) -> List[str]:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 38
| 0
|
def A_ ( _lowerCAmelCase ) -> str:
for i in range(len(A__ ) - 1 , 0 , -1 ):
UpperCamelCase : Dict = False
for j in range(A__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCamelCase , UpperCamelCase : int = unsorted[j - 1], unsorted[j]
UpperCamelCase : List[Any] = True
for j in range(A__ ):
if unsorted[j] > unsorted[j + 1]:
UpperCamelCase , UpperCamelCase : List[Any] = unsorted[j + 1], unsorted[j]
UpperCamelCase : List[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : Optional[int] = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 713
|
from PIL import Image
def A_ ( _lowerCAmelCase ) -> Image:
UpperCamelCase , UpperCamelCase : List[Any] = image.size
UpperCamelCase : Union[str, Any] = 0
UpperCamelCase : List[str] = image.load()
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
UpperCamelCase : List[Any] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCAmelCase ):
for i in range(_lowerCAmelCase ):
UpperCamelCase : Union[str, Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 38
| 0
|
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__lowerCamelCase : str = logging.get_logger(__name__)
logging.set_verbosity_info()
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
if "xprophetnet" in prophetnet_checkpoint_path:
UpperCamelCase : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ )
UpperCamelCase : Any = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
else:
UpperCamelCase : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ )
UpperCamelCase : Optional[int] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
UpperCamelCase : int = ["key_proj", "value_proj", "query_proj"]
UpperCamelCase : str = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
UpperCamelCase : Union[str, Any] = key.split("." )
if attributes[0] == "lm_head":
UpperCamelCase : Tuple = prophet
UpperCamelCase : Tuple = prophet_old
else:
UpperCamelCase : Tuple = prophet.prophetnet
UpperCamelCase : List[str] = prophet_old.model
UpperCamelCase : int = False
for attribute in attributes:
if attribute in mapping:
UpperCamelCase : int = mapping[attribute]
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0:
UpperCamelCase : Dict = attribute
elif hasattr(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase : Optional[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
UpperCamelCase : Any = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
UpperCamelCase : str = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
UpperCamelCase : Tuple = old_model.bias
logger.info(F"""{attribute} is initialized""" )
UpperCamelCase : str = True
break
elif attribute in special_keys and hasattr(lowerCamelCase__ , "in_proj_weight" ):
UpperCamelCase : str = old_model.in_proj_weight.shape[0] // 3
UpperCamelCase : Any = getattr(lowerCamelCase__ , lowerCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
UpperCamelCase : List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
UpperCamelCase : str = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
UpperCamelCase : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
UpperCamelCase : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
UpperCamelCase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
UpperCamelCase : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
UpperCamelCase : Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
UpperCamelCase : List[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
UpperCamelCase : Union[str, Any] = True
break
if attribute.isdigit():
UpperCamelCase : str = model[int(lowerCamelCase__ )]
UpperCamelCase : Union[str, Any] = old_model[int(lowerCamelCase__ )]
else:
UpperCamelCase : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if old_attribute == "":
UpperCamelCase : str = old_model
else:
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
UpperCamelCase : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 714
|
from math import loga
def A_ ( _lowerCAmelCase ) -> int:
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38
| 0
|
import unittest
import numpy as np
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , ) -> np.ndarray:
UpperCamelCase : Optional[Any] = np.shape(a__ )
UpperCamelCase : Union[str, Any] = np.shape(a__ )
UpperCamelCase : Tuple = np.shape(a__ )
if shape_a[0] != shape_b[0]:
UpperCamelCase : Dict = (
"Expected the same number of rows for A and B. "
F"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(a__ )
if shape_b[1] != shape_c[1]:
UpperCamelCase : List[str] = (
"Expected the same number of columns for B and C. "
F"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(a__ )
UpperCamelCase : Dict = pseudo_inv
if a_inv is None:
try:
UpperCamelCase : Tuple = np.linalg.inv(a__ )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class A__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCamelCase : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCamelCase : Tuple = np.array([[2, 1], [6, 3]] )
UpperCamelCase : Tuple = schur_complement(lowercase_ , lowercase_ , lowercase_ )
UpperCamelCase : Dict = np.block([[a, b], [b.T, c]] )
UpperCamelCase : Union[str, Any] = np.linalg.det(lowercase_ )
UpperCamelCase : Optional[int] = np.linalg.det(lowercase_ )
UpperCamelCase : Dict = np.linalg.det(lowercase_ )
self.assertAlmostEqual(lowercase_ , det_a * det_s )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCamelCase : int = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCamelCase : Optional[Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCamelCase : Any = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCamelCase : Union[str, Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 715
|
from __future__ import annotations
__lowerCamelCase : Optional[int] = """Muhammad Umer Farooq"""
__lowerCamelCase : Tuple = """MIT"""
__lowerCamelCase : Optional[int] = """1.0.0"""
__lowerCamelCase : int = """Muhammad Umer Farooq"""
__lowerCamelCase : Optional[int] = """contact@muhammadumerfarooq.me"""
__lowerCamelCase : Dict = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase : list[str] = []
UpperCamelCase : str = domain
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
UpperCamelCase : Any = parse.urljoin(self.domain , A_ )
self.urls.append(A_ )
def A_ ( _lowerCAmelCase ) -> str:
return ".".join(get_sub_domain_name(_lowerCAmelCase ).split("." )[-2:] )
def A_ ( _lowerCAmelCase ) -> str:
return parse.urlparse(_lowerCAmelCase ).netloc
def A_ ( _lowerCAmelCase = "https://github.com" ) -> list[str]:
UpperCamelCase : int = get_domain_name(_lowerCAmelCase )
# Initialize the parser
UpperCamelCase : str = Parser(_lowerCAmelCase )
try:
# Open URL
UpperCamelCase : int = requests.get(_lowerCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
UpperCamelCase : Optional[Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
UpperCamelCase : Optional[Any] = requests.get(_lowerCAmelCase )
# Get the valid email.
UpperCamelCase : Optional[int] = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_lowerCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Tuple = emails_from_url("""https://github.com""")
print(f"""{len(emails)} emails found:""")
print("""\n""".join(sorted(emails)))
| 38
| 0
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class A__ ( nn.Module ):
_UpperCAmelCase :int
_UpperCAmelCase :int
_UpperCAmelCase :float = 0.0
_UpperCAmelCase :int = 1
_UpperCAmelCase :int = 1
_UpperCAmelCase :bool = True
_UpperCAmelCase :bool = False
_UpperCAmelCase :bool = False
_UpperCAmelCase :bool = False
_UpperCAmelCase :jnp.dtype = jnp.floataa
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = []
UpperCamelCase : List[str] = []
for i in range(self.num_layers ):
UpperCamelCase : Optional[Any] = self.in_channels if i == 0 else self.out_channels
UpperCamelCase : Union[str, Any] = FlaxResnetBlockaD(
in_channels=lowerCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
UpperCamelCase : Tuple = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase_ )
UpperCamelCase : str = resnets
UpperCamelCase : Optional[Any] = attentions
if self.add_downsample:
UpperCamelCase : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , A_ , A_ , A_ , A_=True ):
'''simple docstring'''
UpperCamelCase : Tuple = ()
for resnet, attn in zip(self.resnets , self.attentions ):
UpperCamelCase : Optional[Any] = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
UpperCamelCase : Dict = attn(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCamelCase : Tuple = self.downsamplers_a(lowerCAmelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class A__ ( nn.Module ):
_UpperCAmelCase :int
_UpperCAmelCase :int
_UpperCAmelCase :float = 0.0
_UpperCAmelCase :int = 1
_UpperCAmelCase :bool = True
_UpperCAmelCase :jnp.dtype = jnp.floataa
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = []
for i in range(self.num_layers ):
UpperCamelCase : int = self.in_channels if i == 0 else self.out_channels
UpperCamelCase : List[Any] = FlaxResnetBlockaD(
in_channels=lowerCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
UpperCamelCase : List[str] = resnets
if self.add_downsample:
UpperCamelCase : Optional[Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , A_ , A_ , A_=True ):
'''simple docstring'''
UpperCamelCase : Optional[int] = ()
for resnet in self.resnets:
UpperCamelCase : Optional[int] = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCamelCase : Optional[Any] = self.downsamplers_a(lowerCAmelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class A__ ( nn.Module ):
_UpperCAmelCase :int
_UpperCAmelCase :int
_UpperCAmelCase :int
_UpperCAmelCase :float = 0.0
_UpperCAmelCase :int = 1
_UpperCAmelCase :int = 1
_UpperCAmelCase :bool = True
_UpperCAmelCase :bool = False
_UpperCAmelCase :bool = False
_UpperCAmelCase :bool = False
_UpperCAmelCase :jnp.dtype = jnp.floataa
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = []
UpperCamelCase : Any = []
for i in range(self.num_layers ):
UpperCamelCase : Optional[int] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCamelCase : List[str] = self.prev_output_channel if i == 0 else self.out_channels
UpperCamelCase : int = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
UpperCamelCase : List[str] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase_ )
UpperCamelCase : Optional[Any] = resnets
UpperCamelCase : int = attentions
if self.add_upsample:
UpperCamelCase : Union[str, Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , A_ , A_ , A_ , A_ , A_=True ):
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
UpperCamelCase : int = res_hidden_states_tuple[-1]
UpperCamelCase : str = res_hidden_states_tuple[:-1]
UpperCamelCase : Any = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCamelCase : int = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
UpperCamelCase : Optional[Any] = attn(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
if self.add_upsample:
UpperCamelCase : str = self.upsamplers_a(lowerCAmelCase_ )
return hidden_states
class A__ ( nn.Module ):
_UpperCAmelCase :int
_UpperCAmelCase :int
_UpperCAmelCase :int
_UpperCAmelCase :float = 0.0
_UpperCAmelCase :int = 1
_UpperCAmelCase :bool = True
_UpperCAmelCase :jnp.dtype = jnp.floataa
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = []
for i in range(self.num_layers ):
UpperCamelCase : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCamelCase : str = self.prev_output_channel if i == 0 else self.out_channels
UpperCamelCase : Dict = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
UpperCamelCase : Tuple = resnets
if self.add_upsample:
UpperCamelCase : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , A_ , A_ , A_ , A_=True ):
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
UpperCamelCase : Dict = res_hidden_states_tuple[-1]
UpperCamelCase : Optional[int] = res_hidden_states_tuple[:-1]
UpperCamelCase : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCamelCase : Optional[int] = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
if self.add_upsample:
UpperCamelCase : Dict = self.upsamplers_a(lowerCAmelCase_ )
return hidden_states
class A__ ( nn.Module ):
_UpperCAmelCase :int
_UpperCAmelCase :float = 0.0
_UpperCAmelCase :int = 1
_UpperCAmelCase :int = 1
_UpperCAmelCase :bool = False
_UpperCAmelCase :bool = False
_UpperCAmelCase :jnp.dtype = jnp.floataa
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
UpperCamelCase : Optional[Any] = []
for _ in range(self.num_layers ):
UpperCamelCase : List[str] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase_ )
UpperCamelCase : List[str] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
UpperCamelCase : Tuple = resnets
UpperCamelCase : List[Any] = attentions
def __call__( self , A_ , A_ , A_ , A_=True ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.resnets[0](lowerCAmelCase_ , lowerCAmelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
UpperCamelCase : int = attn(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
UpperCamelCase : Optional[int] = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
return hidden_states
| 716
|
from __future__ import annotations
def A_ ( _lowerCAmelCase ) -> list[int]:
UpperCamelCase : Optional[Any] = [True] * limit
UpperCamelCase : Optional[Any] = False
UpperCamelCase : List[str] = False
UpperCamelCase : Tuple = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase : Optional[Any] = i * 2
while index < limit:
UpperCamelCase : int = False
UpperCamelCase : Optional[int] = index + i
UpperCamelCase : Any = [2]
for i in range(3 , _lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCAmelCase )
return primes
def A_ ( _lowerCAmelCase = 100_0000 ) -> int:
UpperCamelCase : Union[str, Any] = prime_sieve(_lowerCAmelCase )
UpperCamelCase : List[str] = 0
UpperCamelCase : Union[str, Any] = 0
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + length , len(_lowerCAmelCase ) ):
UpperCamelCase : Dict = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase : int = j - i
UpperCamelCase : Dict = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 38
| 0
|
import math
def A_ ( _lowerCAmelCase ) -> list:
UpperCamelCase : str = [True] * n
UpperCamelCase : int = False
UpperCamelCase : Any = False
UpperCamelCase : Any = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
UpperCamelCase : Union[str, Any] = i * 2
while index < n:
UpperCamelCase : Dict = False
UpperCamelCase : Dict = index + i
UpperCamelCase : Tuple = [2]
for i in range(3 , _lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCAmelCase )
return primes
def A_ ( _lowerCAmelCase = 9999_6666_3333 ) -> int:
UpperCamelCase : Dict = math.floor(math.sqrt(_lowerCAmelCase ) ) + 100
UpperCamelCase : Union[str, Any] = prime_sieve(_lowerCAmelCase )
UpperCamelCase : Tuple = 0
UpperCamelCase : Dict = 0
UpperCamelCase : str = primes[prime_index]
while (last_prime**2) <= limit:
UpperCamelCase : Dict = primes[prime_index + 1]
UpperCamelCase : Optional[Any] = last_prime**2
UpperCamelCase : Any = next_prime**2
# Get numbers divisible by lps(current)
UpperCamelCase : List[str] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
UpperCamelCase : List[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
UpperCamelCase : Optional[Any] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
UpperCamelCase : Tuple = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 717
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class A__ ( __snake_case ):
def __init__( self , A_ , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , A_ = None , **A_ , ):
'''simple docstring'''
super().__init__(
features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , num_proc=A_ , **A_ , )
UpperCamelCase : Optional[int] = Generator(
cache_dir=A_ , features=A_ , generator=A_ , gen_kwargs=A_ , **A_ , )
def __UpperCamelCase( self ):
'''simple docstring'''
if self.streaming:
UpperCamelCase : Optional[Any] = self.builder.as_streaming_dataset(split="train" )
# Build regular (map-style) dataset
else:
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : List[Any] = None
UpperCamelCase : List[str] = None
self.builder.download_and_prepare(
download_config=A_ , download_mode=A_ , verification_mode=A_ , base_path=A_ , num_proc=self.num_proc , )
UpperCamelCase : int = self.builder.as_dataset(
split="train" , verification_mode=A_ , in_memory=self.keep_in_memory )
return dataset
| 38
| 0
|
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if openai_config_file == "":
UpperCamelCase : Any = OpenAIGPTConfig()
else:
UpperCamelCase : List[Any] = OpenAIGPTConfig.from_json_file(_lowerCAmelCase )
UpperCamelCase : Optional[int] = OpenAIGPTModel(_lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
UpperCamelCase : Union[str, Any] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCamelCase : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , _lowerCAmelCase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--openai_checkpoint_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the TensorFlow checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--openai_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
__lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 718
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def A_ ( _lowerCAmelCase ) -> Union[str, Any]: # picklable for multiprocessing
return x.sum()
def A_ ( _lowerCAmelCase ) -> Optional[Any]: # picklable for multiprocessing
return i + 1
@dataclass
class A__ :
_UpperCAmelCase :int
_UpperCAmelCase :str
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[Any] = []
UpperCamelCase : List[Any] = 1
UpperCamelCase : Tuple = [1, 2]
UpperCamelCase : Optional[Any] = {"a": 1, "b": 2}
UpperCamelCase : Optional[Any] = {"a": [1, 2], "b": [3, 4]}
UpperCamelCase : Any = {"a": {"1": 1}, "b": 2}
UpperCamelCase : List[str] = {"a": 1, "b": 2, "c": 3, "d": 4}
UpperCamelCase : Dict = {}
UpperCamelCase : Any = []
UpperCamelCase : Any = 2
UpperCamelCase : Any = [2, 3]
UpperCamelCase : Optional[Any] = {"a": 2, "b": 3}
UpperCamelCase : List[Any] = {"a": [2, 3], "b": [4, 5]}
UpperCamelCase : Tuple = {"a": {"1": 2}, "b": 3}
UpperCamelCase : Dict = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
UpperCamelCase : List[str] = 2
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
UpperCamelCase : List[str] = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
UpperCamelCase : int = {"a": 2, "b": 0, "c": 2}
UpperCamelCase : Union[str, Any] = {
"a": np.eye(2 ).astype(A_ ),
"b": np.zeros(3 ).astype(A_ ),
"c": np.ones(2 ).astype(A_ ),
}
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(A_ ): # can't pickle a local lambda
map_nested(lambda A_ : x + 1 , A_ , num_proc=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {"a": 1, "b": 2}
UpperCamelCase : List[Any] = {"a": 3, "b": 4}
UpperCamelCase : Tuple = {"a": 5, "b": 6}
UpperCamelCase : Union[str, Any] = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(A_ , A_ , A_ ) ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
class A__ :
_UpperCAmelCase :str = 'bar'
UpperCamelCase : List[Any] = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(A_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
UpperCamelCase : Union[str, Any] = {F"""{i}""": i for i in range(_lowerCAmelCase )}
UpperCamelCase : List[str] = map_nested(lambda _lowerCAmelCase : x + 10 , _lowerCAmelCase , num_proc=_lowerCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A__ ( __snake_case ):
@require_tf
def __UpperCamelCase( self ):
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase : int = layers.Dense(2 )
def gen_random_output():
UpperCamelCase : Optional[Any] = tf.random.uniform((1, 3) )
return model(A_ ).numpy()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : List[Any] = gen_random_output()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : Dict = gen_random_output()
UpperCamelCase : Optional[int] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
import torch
def gen_random_output():
UpperCamelCase : Optional[Any] = torch.nn.Linear(3 , 2 )
UpperCamelCase : Dict = torch.rand(1 , 3 )
return model(A_ ).detach().numpy()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : Dict = gen_random_output()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : Optional[int] = gen_random_output()
UpperCamelCase : List[Any] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __UpperCamelCase( self ):
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase : Optional[Any] = gen_random_output()
with temp_seed(42 ):
UpperCamelCase : Optional[Any] = gen_random_output()
UpperCamelCase : Optional[Any] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Optional[Any] = NestedDataStructure(_lowerCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
UpperCamelCase : Dict = NestedDataStructure(_lowerCAmelCase ).flatten()
assert output == expected_output
def A_ ( ) -> List[Any]:
UpperCamelCase : str = A(x=1 , y="foobar" )
UpperCamelCase : Tuple = {"x": 1, "y": "foobar"}
assert asdict(_lowerCAmelCase ) == expected_output
UpperCamelCase : List[str] = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
UpperCamelCase : Tuple = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(_lowerCAmelCase ) == expected_output
with pytest.raises(_lowerCAmelCase ):
asdict([1, A(x=10 , y="foo" )] )
def A_ ( _lowerCAmelCase ) -> Tuple:
return text.split()
def A_ ( _lowerCAmelCase ) -> Dict:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def A_ ( ) -> str:
with Pool(2 ) as pool:
UpperCamelCase : List[str] = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase : Dict = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase : Any = []
for yield_time, content in iflatmap_unordered(
_lowerCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_lowerCAmelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(_lowerCAmelCase ) == 4
| 38
| 0
|
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class A__ ( unittest.TestCase ):
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = parent
def __UpperCamelCase( self ):
'''simple docstring'''
return {}
def A_ ( ) -> Tuple:
UpperCamelCase : Optional[int] = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
UpperCamelCase : List[Any] = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class A__ ( __lowerCamelCase , unittest.TestCase ):
_UpperCAmelCase :int = MarkupLMFeatureExtractor if is_bsa_available() else None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = MarkupLMFeatureExtractionTester(self )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.feature_extraction_class()
# Test not batched input
UpperCamelCase : List[Any] = get_html_strings()[0]
UpperCamelCase : Dict = feature_extractor(SCREAMING_SNAKE_CASE_ )
# fmt: off
UpperCamelCase : int = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
UpperCamelCase : List[str] = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoding.xpaths , SCREAMING_SNAKE_CASE_ )
# Test batched
UpperCamelCase : str = get_html_strings()
UpperCamelCase : Dict = feature_extractor(SCREAMING_SNAKE_CASE_ )
# fmt: off
UpperCamelCase : List[str] = expected_nodes + [["My First Heading", "My first paragraph."]]
UpperCamelCase : Union[str, Any] = expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoding.xpaths , SCREAMING_SNAKE_CASE_ )
| 719
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Tuple = ['note_seq']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["note_seq"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
| 38
| 0
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : str = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
__lowerCamelCase : int = 5
__lowerCamelCase : Dict = 10
@require_sentencepiece
@require_tokenizers
class A__ ( _snake_case , unittest.TestCase ):
_UpperCAmelCase :str = SpeechaTextTokenizer
_UpperCAmelCase :int = False
_UpperCAmelCase :List[str] = True
def __UpperCamelCase( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase : Optional[Any] = sp.SentencePieceProcessor()
spm_model.Load(snake_case_ )
UpperCamelCase : Any = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(snake_case_ ) )]
UpperCamelCase : Union[str, Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase : Dict = Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
UpperCamelCase : List[Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = "<pad>"
UpperCamelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(snake_case_ ) , 1001 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
UpperCamelCase : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [289, 50, 14, 174, 386] , )
UpperCamelCase : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case_ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
UpperCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(
snake_case_ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class A__ ( unittest.TestCase ):
_UpperCAmelCase :Tuple = """valhalla/s2t_mustc_multilinguial_medium"""
_UpperCAmelCase :Union[str, Any] = """C'est trop cool"""
_UpperCAmelCase :Any = """Esto es genial"""
@classmethod
def __UpperCamelCase( cls ):
'''simple docstring'''
UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __UpperCamelCase( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.vocab_size , 1_0000 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.assertIn(snake_case_ , self.tokenizer.all_special_ids )
UpperCamelCase : Optional[int] = [ES_CODE, 4, 1601, 47, 7647, 2]
UpperCamelCase : Tuple = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
UpperCamelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertNotIn(self.tokenizer.eos_token , snake_case_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = "fr"
UpperCamelCase : int = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , snake_case_ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
UpperCamelCase : List[Any] = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 720
|
import math
import tensorflow as tf
from packaging import version
def A_ ( _lowerCAmelCase ) -> Any:
UpperCamelCase : List[Any] = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : Any = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def A_ ( _lowerCAmelCase ) -> Dict:
UpperCamelCase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(math.pi , x.dtype )
UpperCamelCase : Optional[Any] = tf.cast(0.044_715 , x.dtype )
UpperCamelCase : int = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_lowerCAmelCase , 3 )) ))
return x * cdf
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : str = tf.convert_to_tensor(_lowerCAmelCase )
return x * tf.tanh(tf.math.softplus(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Tuple = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(0.044_715 , x.dtype )
UpperCamelCase : Optional[Any] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : Any = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def A_ ( _lowerCAmelCase ) -> List[Any]:
return tf.clip_by_value(_gelu(_lowerCAmelCase ) , -10 , 10 )
def A_ ( _lowerCAmelCase , _lowerCAmelCase=-1 ) -> str:
UpperCamelCase , UpperCamelCase : List[Any] = tf.split(_lowerCAmelCase , 2 , axis=_lowerCAmelCase )
return a * tf.math.sigmoid(_lowerCAmelCase )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def A_ ( _lowerCAmelCase ) -> Any:
return tf.keras.activations.gelu(_lowerCAmelCase , approximate=_lowerCAmelCase )
__lowerCamelCase : Optional[int] = tf.keras.activations.gelu
__lowerCamelCase : int = approximate_gelu_wrap
else:
__lowerCamelCase : List[Any] = _gelu
__lowerCamelCase : Optional[Any] = _gelu_new
__lowerCamelCase : Any = {
"""gelu""": gelu,
"""gelu_10""": gelu_aa,
"""gelu_fast""": gelu_fast,
"""gelu_new""": gelu_new,
"""glu""": glu,
"""mish""": mish,
"""quick_gelu""": quick_gelu,
"""relu""": tf.keras.activations.relu,
"""sigmoid""": tf.keras.activations.sigmoid,
"""silu""": tf.keras.activations.swish,
"""swish""": tf.keras.activations.swish,
"""tanh""": tf.keras.activations.tanh,
}
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 38
| 0
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class A__ ( lowercase__ ):
'''simple docstring'''
@slow
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
UpperCamelCase : str = BertTokenizer.from_pretrained("bert-base-uncased" )
UpperCamelCase : List[str] = bertabert.config.encoder.vocab_size
UpperCamelCase : Tuple = tokenizer.sep_token_id
UpperCamelCase : Tuple = tokenizer.cls_token_id
UpperCamelCase : Optional[int] = 128
UpperCamelCase : List[str] = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
UpperCamelCase : List[Any] = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
UpperCamelCase : List[str] = train_dataset.select(range(32 ) )
UpperCamelCase : Optional[Any] = val_dataset.select(range(16 ) )
UpperCamelCase : Optional[int] = 4
def _map_to_encoder_decoder_inputs(A_ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCamelCase : Tuple = tokenizer(batch["article"] , padding="max_length" , truncation=UpperCAmelCase__ , max_length=512 )
UpperCamelCase : Union[str, Any] = tokenizer(batch["highlights"] , padding="max_length" , truncation=UpperCAmelCase__ , max_length=128 )
UpperCamelCase : Optional[int] = inputs.input_ids
UpperCamelCase : List[Any] = inputs.attention_mask
UpperCamelCase : List[Any] = outputs.input_ids
UpperCamelCase : Optional[int] = outputs.input_ids.copy()
UpperCamelCase : List[Any] = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
UpperCamelCase : Dict = outputs.attention_mask
assert all(len(UpperCAmelCase__ ) == 512 for x in inputs.input_ids )
assert all(len(UpperCAmelCase__ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(A_ ):
UpperCamelCase : Optional[Any] = pred.label_ids
UpperCamelCase : Union[str, Any] = pred.predictions
# all unnecessary tokens are removed
UpperCamelCase : Union[str, Any] = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
UpperCamelCase : int = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
UpperCamelCase : str = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase__ ) )] ) / len(UpperCAmelCase__ )
return {"accuracy": accuracy}
# map train dataset
UpperCamelCase : Union[str, Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
UpperCamelCase : List[str] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
UpperCamelCase : str = self.get_auto_remove_tmp_dir()
UpperCamelCase : int = SeqaSeqTrainingArguments(
output_dir=UpperCAmelCase__ , per_device_train_batch_size=UpperCAmelCase__ , per_device_eval_batch_size=UpperCAmelCase__ , predict_with_generate=UpperCAmelCase__ , evaluation_strategy="steps" , do_train=UpperCAmelCase__ , do_eval=UpperCAmelCase__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCamelCase : Union[str, Any] = SeqaSeqTrainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , )
# start training
trainer.train()
| 721
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :str = KandinskyVaaPipeline
_UpperCAmelCase :str = [
'image_embeds',
'negative_image_embeds',
]
_UpperCAmelCase :str = ['image_embeds', 'negative_image_embeds']
_UpperCAmelCase :List[str] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCAmelCase :List[str] = False
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 100
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : List[str] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase : Dict = UNetaDConditionModel(**A_ )
return model
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.dummy_unet
UpperCamelCase : Optional[Any] = self.dummy_movq
UpperCamelCase : Dict = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , )
UpperCamelCase : Tuple = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCamelCase( self , A_ , A_=0 ):
'''simple docstring'''
UpperCamelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
if str(A_ ).startswith("mps" ):
UpperCamelCase : Optional[Any] = torch.manual_seed(A_ )
else:
UpperCamelCase : List[Any] = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase : Optional[int] = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = "cpu"
UpperCamelCase : List[str] = self.get_dummy_components()
UpperCamelCase : Tuple = self.pipeline_class(**A_ )
UpperCamelCase : List[str] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Dict = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase : Optional[int] = output.images
UpperCamelCase : int = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : int = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
UpperCamelCase : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
UpperCamelCase : Dict = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCamelCase : Tuple = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase : str = "red cat, 4k photo"
UpperCamelCase : str = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase : Tuple = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase : int = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase : Tuple = pipeline(
image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , output_type="np" , )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A_ , A_ )
| 38
| 0
|
import re
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Any = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(_lowerCAmelCase , _lowerCAmelCase ) )
if __name__ == "__main__":
__lowerCamelCase : Dict = """0094702343221"""
print(is_sri_lankan_phone_number(phone))
| 700
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A_ ( ) -> Dict:
UpperCamelCase : Tuple = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_lowerCAmelCase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_lowerCAmelCase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_lowerCAmelCase )
return parser.parse_args()
def A_ ( ) -> Optional[int]:
UpperCamelCase : Tuple = parse_args()
# Import training_script as a module.
UpperCamelCase : Union[str, Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCamelCase : List[Any] = script_fpath.stem
UpperCamelCase : Optional[Any] = importlib.import_module(_lowerCAmelCase )
# Patch sys.argv
UpperCamelCase : List[Any] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 38
| 0
|
from __future__ import annotations
def A_ ( _lowerCAmelCase ):
UpperCamelCase : Dict = 2
UpperCamelCase : str = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCamelCase_ )
if n > 1:
factors.append(lowerCamelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Union[str, Any] = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 38
| 0
|
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
try:
with open(UpperCamelCase__ , "rb" ) as flax_state_f:
UpperCamelCase : Union[str, Any] = from_bytes(UpperCamelCase__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(UpperCamelCase__ ) as f:
if f.read().startswith("version" ):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please"
" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
" folder you cloned." )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(UpperCamelCase__ , UpperCamelCase__ )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase : Optional[int] = flatten_dict(jax.tree_util.tree_map(lambda _lowerCAmelCase : x.dtype == jnp.bfloataa , UpperCamelCase__ ) ).values()
if any(UpperCamelCase__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase : List[Any] = jax.tree_util.tree_map(
lambda _lowerCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCamelCase__ )
UpperCamelCase : List[str] = ""
UpperCamelCase : Union[str, Any] = flatten_dict(UpperCamelCase__ , sep="." )
UpperCamelCase : Union[str, Any] = pt_model.state_dict()
# keep track of unexpected & missing keys
UpperCamelCase : List[Any] = []
UpperCamelCase : Optional[int] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase : Any = flax_key_tuple.split("." )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
UpperCamelCase : Optional[Any] = flax_key_tuple_array[:-1] + ["weight"]
UpperCamelCase : int = jnp.transpose(UpperCamelCase__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
UpperCamelCase : Tuple = flax_key_tuple_array[:-1] + ["weight"]
UpperCamelCase : List[Any] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
UpperCamelCase : List[str] = flax_key_tuple_array[:-1] + ["weight"]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(UpperCamelCase__ ):
UpperCamelCase : List[Any] = (
flax_key_tuple_string.replace("_0" , ".0" )
.replace("_1" , ".1" )
.replace("_2" , ".2" )
.replace("_3" , ".3" )
.replace("_4" , ".4" )
.replace("_5" , ".5" )
.replace("_6" , ".6" )
.replace("_7" , ".7" )
.replace("_8" , ".8" )
.replace("_9" , ".9" )
)
UpperCamelCase : Any = ".".join(UpperCamelCase__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
UpperCamelCase : Union[str, Any] = np.asarray(UpperCamelCase__ ) if not isinstance(UpperCamelCase__ , np.ndarray ) else flax_tensor
UpperCamelCase : str = torch.from_numpy(UpperCamelCase__ )
# remove from missing keys
missing_keys.remove(UpperCamelCase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(UpperCamelCase__ )
pt_model.load_state_dict(UpperCamelCase__ )
# re-transform missing_keys to list
UpperCamelCase : List[Any] = list(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
if len(UpperCamelCase__ ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
" use it for predictions and inference." )
return pt_model
| 702
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=10 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=None , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = size if size is not None else {"shortest_edge": 18}
UpperCamelCase : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCamelCase : Optional[Any] = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : List[Any] = num_channels
UpperCamelCase : Union[str, Any] = num_frames
UpperCamelCase : Any = image_size
UpperCamelCase : Tuple = min_resolution
UpperCamelCase : Optional[Any] = max_resolution
UpperCamelCase : Any = do_resize
UpperCamelCase : Tuple = size
UpperCamelCase : List[Any] = do_normalize
UpperCamelCase : Optional[int] = image_mean
UpperCamelCase : Any = image_std
UpperCamelCase : str = crop_size
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[str] = VivitImageProcessor if is_vision_available() else None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = VivitImageProcessingTester(self )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "do_center_crop" ) )
self.assertTrue(hasattr(A_ , "size" ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
UpperCamelCase : Any = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : str = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : str = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : Any = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : List[Any] = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 38
| 0
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
# TODO Update this
__lowerCamelCase : Optional[int] = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A__ ( snake_case__ ):
_UpperCAmelCase :Optional[int] = 'esm'
def __init__( self , A_=None , A_=None , A_=None , A_=768 , A_=12 , A_=12 , A_=3072 , A_=0.1 , A_=0.1 , A_=1026 , A_=0.02 , A_=1e-12 , A_="absolute" , A_=True , A_=None , A_=False , A_=False , A_=None , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase_ , mask_token_id=lowercase_ , **lowercase_ )
UpperCamelCase : Optional[Any] = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : str = num_attention_heads
UpperCamelCase : Optional[int] = intermediate_size
UpperCamelCase : Optional[Any] = hidden_dropout_prob
UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase : Tuple = max_position_embeddings
UpperCamelCase : Tuple = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : List[str] = position_embedding_type
UpperCamelCase : List[Any] = use_cache
UpperCamelCase : Dict = emb_layer_norm_before
UpperCamelCase : str = token_dropout
UpperCamelCase : Any = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
UpperCamelCase : Optional[int] = EsmFoldConfig()
elif isinstance(lowercase_ , lowercase_ ):
UpperCamelCase : List[Any] = EsmFoldConfig(**lowercase_ )
UpperCamelCase : Dict = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
UpperCamelCase : List[str] = get_default_vocab_list()
else:
UpperCamelCase : Dict = vocab_list
else:
UpperCamelCase : Dict = None
UpperCamelCase : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowercase_ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = super().to_dict()
if isinstance(self.esmfold_config , lowercase_ ):
UpperCamelCase : Tuple = self.esmfold_config.to_dict()
return output
@dataclass
class A__ :
_UpperCAmelCase :int = None
_UpperCAmelCase :int = True
_UpperCAmelCase :Optional[int] = False
_UpperCAmelCase :Optional[int] = False
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :List[Any] = 0
_UpperCAmelCase :List[Any] = True
_UpperCAmelCase :List[str] = False
_UpperCAmelCase :List[Any] = 1_2_8
_UpperCAmelCase :int = None
def __UpperCamelCase( self ):
'''simple docstring'''
if self.trunk is None:
UpperCamelCase : List[str] = TrunkConfig()
elif isinstance(self.trunk , lowercase_ ):
UpperCamelCase : str = TrunkConfig(**self.trunk )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = asdict(self )
UpperCamelCase : Optional[Any] = self.trunk.to_dict()
return output
@dataclass
class A__ :
_UpperCAmelCase :Any = 4_8
_UpperCAmelCase :int = 1_0_2_4
_UpperCAmelCase :str = 1_2_8
_UpperCAmelCase :str = 3_2
_UpperCAmelCase :Any = 3_2
_UpperCAmelCase :Optional[int] = 3_2
_UpperCAmelCase :Dict = 0
_UpperCAmelCase :str = 0
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :str = 4
_UpperCAmelCase :Optional[Any] = 1_2_8
_UpperCAmelCase :Dict = None
def __UpperCamelCase( self ):
'''simple docstring'''
if self.structure_module is None:
UpperCamelCase : str = StructureModuleConfig()
elif isinstance(self.structure_module , lowercase_ ):
UpperCamelCase : List[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
UpperCamelCase : str = self.sequence_state_dim // self.sequence_head_width
UpperCamelCase : Tuple = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = asdict(self )
UpperCamelCase : int = self.structure_module.to_dict()
return output
@dataclass
class A__ :
_UpperCAmelCase :Tuple = 3_8_4
_UpperCAmelCase :List[Any] = 1_2_8
_UpperCAmelCase :int = 1_6
_UpperCAmelCase :Tuple = 1_2_8
_UpperCAmelCase :List[Any] = 1_2
_UpperCAmelCase :List[str] = 4
_UpperCAmelCase :Tuple = 8
_UpperCAmelCase :Optional[int] = 0.1
_UpperCAmelCase :Optional[int] = 8
_UpperCAmelCase :Dict = 1
_UpperCAmelCase :Tuple = 2
_UpperCAmelCase :List[str] = 7
_UpperCAmelCase :Tuple = 1_0
_UpperCAmelCase :str = 1e-8
_UpperCAmelCase :Union[str, Any] = 1e5
def __UpperCamelCase( self ):
'''simple docstring'''
return asdict(self )
def A_ ( ) -> Union[str, Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 703
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__lowerCamelCase : Dict = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__lowerCamelCase : Tuple = {
"""facebook/blenderbot_small-90M""": 512,
}
class A__ ( __snake_case ):
_UpperCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase :Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Optional[Any] = BlenderbotSmallTokenizer
def __init__( self , A_=None , A_=None , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|endoftext|>" , A_=False , A_=True , **A_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=A_ , merges=A_ , add_prefix_space=A_ , trim_offsets=A_ , ) , bos_token=A_ , eos_token=A_ , unk_token=A_ , **A_ , )
UpperCamelCase : Union[str, Any] = add_prefix_space
def __UpperCamelCase( self , A_ , A_=None ):
'''simple docstring'''
UpperCamelCase : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Tuple = [self.sep_token_id]
UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 38
| 0
|
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__lowerCamelCase : List[Any] = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__lowerCamelCase : str = logging.get_logger(__name__)
class A__ ( _UpperCAmelCase ):
_UpperCAmelCase :Tuple = '''maskformer'''
_UpperCAmelCase :Optional[int] = {'''hidden_size''': '''mask_feature_size'''}
_UpperCAmelCase :List[str] = ['''resnet''', '''swin''']
_UpperCAmelCase :Dict = ['''detr''']
def __init__( self , A_ = 256 , A_ = 256 , A_ = 0.1 , A_ = False , A_ = None , A_ = None , A_ = 0.02 , A_ = 1.0 , A_ = 1.0 , A_ = 1.0 , A_ = 20.0 , A_ = None , **A_ , ):
'''simple docstring'''
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCamelCase : Dict = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(A_ , A_ ):
UpperCamelCase : str = backbone_config.pop("model_type" )
UpperCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase : List[str] = config_class.from_dict(A_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCamelCase : List[str] = DetrConfig()
else:
# verify that the decoder is supported
UpperCamelCase : Dict = (
decoder_config.pop("model_type" ) if isinstance(A_ , A_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"""Transformer Decoder {decoder_type} not supported, please use one of"""
F""" {",".join(self.decoders_supported )}""" )
if isinstance(A_ , A_ ):
UpperCamelCase : Optional[Any] = CONFIG_MAPPING[decoder_type]
UpperCamelCase : Optional[Any] = config_class.from_dict(A_ )
UpperCamelCase : str = backbone_config
UpperCamelCase : Optional[Any] = decoder_config
# main feature dimension for the model
UpperCamelCase : Optional[Any] = fpn_feature_size
UpperCamelCase : Tuple = mask_feature_size
# initializer
UpperCamelCase : List[str] = init_std
UpperCamelCase : List[Any] = init_xavier_std
# Hungarian matcher && loss
UpperCamelCase : Optional[Any] = cross_entropy_weight
UpperCamelCase : List[str] = dice_weight
UpperCamelCase : Optional[Any] = mask_weight
UpperCamelCase : Optional[Any] = use_auxiliary_loss
UpperCamelCase : Dict = no_object_weight
UpperCamelCase : Tuple = output_auxiliary_logits
UpperCamelCase : Union[str, Any] = self.decoder_config.encoder_attention_heads
UpperCamelCase : List[Any] = self.decoder_config.num_hidden_layers
super().__init__(**A_ )
@classmethod
def __UpperCamelCase( cls , A_ , A_ , **A_ ):
'''simple docstring'''
return cls(
backbone_config=A_ , decoder_config=A_ , **A_ , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase : str = self.backbone_config.to_dict()
UpperCamelCase : str = self.decoder_config.to_dict()
UpperCamelCase : Tuple = self.__class__.model_type
return output
| 704
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : int = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 38
| 0
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class A__ ( unittest.TestCase ):
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=4 , ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = parent
UpperCamelCase : List[str] = batch_size
UpperCamelCase : List[Any] = seq_length
UpperCamelCase : int = is_training
UpperCamelCase : Optional[int] = use_attention_mask
UpperCamelCase : Union[str, Any] = use_token_type_ids
UpperCamelCase : str = use_labels
UpperCamelCase : Optional[Any] = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Dict = num_hidden_layers
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : List[Any] = intermediate_size
UpperCamelCase : str = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase : Tuple = max_position_embeddings
UpperCamelCase : Optional[Any] = type_vocab_size
UpperCamelCase : Optional[int] = type_sequence_label_size
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Tuple = num_choices
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : int = None
if self.use_attention_mask:
UpperCamelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : str = None
if self.use_token_type_ids:
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Any = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
UpperCamelCase : Optional[Any] = config_and_inputs
UpperCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class A__ ( __lowercase , unittest.TestCase ):
_UpperCAmelCase :Union[str, Any] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = FlaxAlbertModelTester(self )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCamelCase : int = model_class_name.from_pretrained("albert-base-v2" )
UpperCamelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(A_ )
@require_flax
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = FlaxAlbertModel.from_pretrained("albert-base-v2" )
UpperCamelCase : int = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
UpperCamelCase : int = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCamelCase : List[Any] = model(A_ , attention_mask=A_ )[0]
UpperCamelCase : List[str] = (1, 11, 768)
self.assertEqual(output.shape , A_ )
UpperCamelCase : Any = np.array(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , A_ , atol=1e-4 ) )
| 705
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__lowerCamelCase : str = None
try:
import msvcrt
except ImportError:
__lowerCamelCase : str = None
try:
import fcntl
except ImportError:
__lowerCamelCase : List[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
__lowerCamelCase : str = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
__lowerCamelCase : Union[str, Any] = """3.0.12"""
__lowerCamelCase : Any = None
def A_ ( ) -> List[Any]:
global _logger
UpperCamelCase : Any = _logger or logging.getLogger(__name__ )
return _logger
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = lock_file
return None
def __str__( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class A__ :
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = lock
return None
def __enter__( self ):
'''simple docstring'''
return self.lock
def __exit__( self , A_ , A_ , A_ ):
'''simple docstring'''
self.lock.release()
return None
class A__ :
def __init__( self , A_ , A_=-1 , A_=None ):
'''simple docstring'''
UpperCamelCase : List[Any] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
UpperCamelCase : Dict = self.hash_filename_if_too_long(A_ , A_ )
# The path to the lock file.
UpperCamelCase : List[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCamelCase : Tuple = None
# The default timeout value.
UpperCamelCase : Optional[Any] = timeout
# We use this lock primarily for the lock counter.
UpperCamelCase : Union[str, Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCamelCase : Dict = 0
return None
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._lock_file
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = float(A_ )
return None
def __UpperCamelCase( self ):
'''simple docstring'''
raise NotImplementedError()
def __UpperCamelCase( self ):
'''simple docstring'''
raise NotImplementedError()
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._lock_file_fd is not None
def __UpperCamelCase( self , A_=None , A_=0.05 ):
'''simple docstring'''
if timeout is None:
UpperCamelCase : Optional[Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCamelCase : Dict = id(self )
UpperCamelCase : List[str] = self._lock_file
UpperCamelCase : int = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(A_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCamelCase : List[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __UpperCamelCase( self , A_=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCamelCase : List[Any] = id(self )
UpperCamelCase : Dict = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
UpperCamelCase : Dict = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self , A_ , A_ , A_ ):
'''simple docstring'''
self.release()
return None
def __del__( self ):
'''simple docstring'''
self.release(force=A_ )
return None
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = os.path.basename(A_ )
if len(A_ ) > max_length and max_length > 0:
UpperCamelCase : Optional[int] = os.path.dirname(A_ )
UpperCamelCase : int = str(hash(A_ ) )
UpperCamelCase : Any = filename[: max_length - len(A_ ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(A_ , A_ )
else:
return path
class A__ ( __snake_case ):
def __init__( self , A_ , A_=-1 , A_=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(A_ , timeout=A_ , max_filename_length=A_ )
UpperCamelCase : List[Any] = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCamelCase : str = os.open(self._lock_file , A_ )
except OSError:
pass
else:
try:
msvcrt.locking(A_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(A_ )
else:
UpperCamelCase : Optional[Any] = fd
return None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self._lock_file_fd
UpperCamelCase : str = None
msvcrt.locking(A_ , msvcrt.LK_UNLCK , 1 )
os.close(A_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class A__ ( __snake_case ):
def __init__( self , A_ , A_=-1 , A_=None ):
'''simple docstring'''
UpperCamelCase : Tuple = os.statvfs(os.path.dirname(A_ ) ).f_namemax
super().__init__(A_ , timeout=A_ , max_filename_length=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCamelCase : int = os.open(self._lock_file , A_ )
try:
fcntl.flock(A_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(A_ )
else:
UpperCamelCase : List[str] = fd
return None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self._lock_file_fd
UpperCamelCase : List[Any] = None
fcntl.flock(A_ , fcntl.LOCK_UN )
os.close(A_ )
return None
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCamelCase : Optional[int] = os.open(self._lock_file , A_ )
except OSError:
pass
else:
UpperCamelCase : Tuple = fd
return None
def __UpperCamelCase( self ):
'''simple docstring'''
os.close(self._lock_file_fd )
UpperCamelCase : str = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__lowerCamelCase : Dict = None
if msvcrt:
__lowerCamelCase : Any = WindowsFileLock
elif fcntl:
__lowerCamelCase : Any = UnixFileLock
else:
__lowerCamelCase : int = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 38
| 0
|
import math
class A__ :
def __init__( self , A_=0 ): # a graph with Node 0,1,...,N-1
'''simple docstring'''
UpperCamelCase : List[Any] = n
UpperCamelCase : List[str] = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # adjacency matrix for weight
UpperCamelCase : Optional[Any] = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = w
def __UpperCamelCase( self ):
'''simple docstring'''
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
UpperCamelCase : str = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
__lowerCamelCase : List[str] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 706
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ) -> str:
if config_name_or_path is None:
UpperCamelCase : Dict = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
UpperCamelCase : Tuple = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
UpperCamelCase : Tuple = question_encoder_name_or_path
UpperCamelCase : Any = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
UpperCamelCase : Optional[Any] = RagConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : Tuple = AutoConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : int = gen_config
UpperCamelCase : Dict = question_encoder_config
UpperCamelCase : Tuple = model_class.from_pretrained_question_encoder_generator(
_lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase )
rag_model.save_pretrained(_lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(_lowerCAmelCase )
# Save tokenizers.
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
__lowerCamelCase : Dict = parser.parse_args()
__lowerCamelCase : Dict = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 38
| 0
|
class A__ :
def __init__( self ):
'''simple docstring'''
UpperCamelCase : Any = """"""
UpperCamelCase : List[str] = """"""
UpperCamelCase : List[Any] = []
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
UpperCamelCase : int = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
UpperCamelCase : Tuple = self.__min_dist_top_down_dp(A_ , n - 1 )
UpperCamelCase : str = self.__min_dist_top_down_dp(m - 1 , A_ )
UpperCamelCase : List[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
UpperCamelCase : Optional[int] = 1 + min(A_ , A_ , A_ )
return self.dp[m][n]
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = worda
UpperCamelCase : int = worda
UpperCamelCase : str = [[-1 for _ in range(len(A_ ) )] for _ in range(len(A_ ) )]
return self.__min_dist_top_down_dp(len(A_ ) - 1 , len(A_ ) - 1 )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = worda
UpperCamelCase : Any = worda
UpperCamelCase : str = len(A_ )
UpperCamelCase : Tuple = len(A_ )
UpperCamelCase : Union[str, Any] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
UpperCamelCase : List[Any] = j
elif j == 0: # second string is empty
UpperCamelCase : List[Any] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
UpperCamelCase : List[Any] = self.dp[i - 1][j - 1]
else:
UpperCamelCase : int = self.dp[i][j - 1]
UpperCamelCase : List[Any] = self.dp[i - 1][j]
UpperCamelCase : Union[str, Any] = self.dp[i - 1][j - 1]
UpperCamelCase : List[Any] = 1 + min(A_ , A_ , A_ )
return self.dp[m][n]
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
__lowerCamelCase : List[Any] = input("""Enter the first string: """).strip()
__lowerCamelCase : Any = input("""Enter the second string: """).strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 707
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
UpperCamelCase : Dict = parent
UpperCamelCase : str = 13
UpperCamelCase : int = 7
UpperCamelCase : str = True
UpperCamelCase : Dict = True
UpperCamelCase : str = True
UpperCamelCase : Tuple = True
UpperCamelCase : List[str] = 99
UpperCamelCase : Optional[Any] = 384
UpperCamelCase : Tuple = 2
UpperCamelCase : Union[str, Any] = 4
UpperCamelCase : Dict = 37
UpperCamelCase : Any = "gelu"
UpperCamelCase : List[Any] = 0.1
UpperCamelCase : int = 0.1
UpperCamelCase : Tuple = 512
UpperCamelCase : List[Any] = 16
UpperCamelCase : int = 2
UpperCamelCase : Dict = 0.02
UpperCamelCase : Optional[Any] = 3
UpperCamelCase : List[Any] = 4
UpperCamelCase : Dict = 128
UpperCamelCase : Optional[Any] = 2
UpperCamelCase : Optional[int] = 9
UpperCamelCase : Optional[int] = 1
UpperCamelCase : Union[str, Any] = None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : str = None
if self.use_input_mask:
UpperCamelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Tuple = None
if self.use_token_type_ids:
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : List[Any] = None
if self.use_labels:
UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Any = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = TFConvBertModel(config=A_ )
UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase : Optional[int] = [input_ids, input_mask]
UpperCamelCase : Any = model(A_ )
UpperCamelCase : int = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = TFConvBertForMaskedLM(config=A_ )
UpperCamelCase : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : Dict = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = self.num_labels
UpperCamelCase : int = TFConvBertForSequenceClassification(config=A_ )
UpperCamelCase : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = self.num_choices
UpperCamelCase : str = TFConvBertForMultipleChoice(config=A_ )
UpperCamelCase : List[Any] = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : Dict = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : Any = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : List[str] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCamelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = self.num_labels
UpperCamelCase : str = TFConvBertForTokenClassification(config=A_ )
UpperCamelCase : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : str = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = TFConvBertForQuestionAnswering(config=A_ )
UpperCamelCase : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : Union[str, Any] = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Optional[Any] = config_and_inputs
UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCAmelCase :Optional[Any] = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase :Any = False
_UpperCAmelCase :int = False
_UpperCAmelCase :str = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = TFConvBertModelTester(self )
UpperCamelCase : Dict = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Any = True
if hasattr(A_ , "use_cache" ):
UpperCamelCase : List[str] = True
UpperCamelCase : List[Any] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Any = getattr(self.model_tester , "key_length" , A_ )
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = self._prepare_for_class(A_ , A_ )
UpperCamelCase : Dict = model_class(A_ )
UpperCamelCase : Optional[int] = len(model(A_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ , saved_model=A_ )
UpperCamelCase : Union[str, Any] = os.path.join(A_ , "saved_model" , "1" )
UpperCamelCase : Dict = tf.keras.models.load_model(A_ )
UpperCamelCase : str = model(A_ )
if self.is_encoder_decoder:
UpperCamelCase : Union[str, Any] = outputs["encoder_hidden_states"]
UpperCamelCase : Any = outputs["encoder_attentions"]
else:
UpperCamelCase : Any = outputs["hidden_states"]
UpperCamelCase : List[str] = outputs["attentions"]
self.assertEqual(len(A_ ) , A_ )
UpperCamelCase : int = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A_ ) , A_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Dict = True
UpperCamelCase : int = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Optional[int] = getattr(self.model_tester , "key_length" , A_ )
UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , A_ )
def check_decoder_attentions_output(A_ ):
UpperCamelCase : Optional[Any] = len(A_ )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase : Any = outputs.decoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(A_ ):
UpperCamelCase : Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : List[Any] = False
UpperCamelCase : Dict = model_class(A_ )
UpperCamelCase : Dict = model(self._prepare_for_class(A_ , A_ ) )
UpperCamelCase : List[str] = len(A_ )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
if self.is_encoder_decoder:
UpperCamelCase : int = model_class(A_ )
UpperCamelCase : Tuple = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_decoder_attentions_output(A_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase : Tuple = True
UpperCamelCase : int = model_class(A_ )
UpperCamelCase : Dict = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
# Check attention is always last and order is fine
UpperCamelCase : Optional[int] = True
UpperCamelCase : List[str] = True
UpperCamelCase : Optional[int] = model_class(A_ )
UpperCamelCase : Optional[Any] = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) )
self.assertEqual(model.config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase : List[str] = model(A_ )[0]
UpperCamelCase : int = [1, 6, 768]
self.assertEqual(output.shape , A_ )
UpperCamelCase : List[str] = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1e-4 )
| 38
| 0
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = """▁"""
__lowerCamelCase : str = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
__lowerCamelCase : Tuple = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
__lowerCamelCase : Any = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
__lowerCamelCase : List[str] = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
__lowerCamelCase : Dict = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class A__ ( __A ):
_UpperCAmelCase :List[str] = ["input_ids"]
_UpperCAmelCase :Dict = VOCAB_FILES_NAMES
_UpperCAmelCase :Tuple = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Tuple = RESOURCE_FILES_NAMES
def __init__( self , A_ , A_=None , A_=False , A_="utf8" , A_="[UNK]" , A_="[SEP]" , A_="[PAD]" , A_="[CLS]" , A_="[MASK]" , A_ = None , **A_ , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , vocab_file=UpperCamelCase__ , encoding=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
UpperCamelCase : str = do_lower_case
UpperCamelCase : Optional[Any] = sentencepiece_model_ckpt
UpperCamelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCamelCase : Optional[int] = self.load_vocab(filepath=UpperCamelCase__ )
else:
UpperCamelCase : Dict = {self.sp_model.id_to_piece(UpperCamelCase__ ): id for id in range(self.sp_model.get_piece_size() )}
UpperCamelCase : Tuple = {v: k for k, v in self.vocab.items()}
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if text is None:
return None
UpperCamelCase : Optional[Any] = self.tokenize(UpperCamelCase__ )
UpperCamelCase : Dict = '', []
for i, ch in enumerate(UpperCamelCase__ ):
if ch in self.SP_CHAR_MAPPING:
UpperCamelCase : Any = self.SP_CHAR_MAPPING.get(UpperCamelCase__ )
else:
UpperCamelCase : Tuple = unicodedata.normalize("NFKC" , UpperCamelCase__ )
if self.is_whitespace(UpperCamelCase__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(UpperCamelCase__ ) )
UpperCamelCase : Tuple = normalized_text, [], 0
if self.do_lower_case:
UpperCamelCase : int = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCamelCase : Dict = token[1:]
UpperCamelCase : List[str] = text[offset:].index(UpperCamelCase__ ) + offset
UpperCamelCase : Tuple = start + len(UpperCamelCase__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCamelCase : List[Any] = end
return token_mapping
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return len(self.vocab )
def __UpperCamelCase( self ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.__dict__.copy()
UpperCamelCase : int = None
return state
def __setstate__( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase : Tuple = {}
UpperCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(UpperCamelCase__ , UpperCamelCase__ ) for c in text) )
def __UpperCamelCase( self , A_ , A_=False , A_=64 , A_=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get("enable_sampling" ) is True:
UpperCamelCase : int = True
if self.sp_model_kwargs.get("alpha" ) is not None:
UpperCamelCase : Tuple = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
UpperCamelCase : Dict = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
UpperCamelCase : Optional[int] = self.sp_model.EncodeAsPieces(UpperCamelCase__ )
else:
UpperCamelCase : Dict = self.sp_model.SampleEncodeAsPieces(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase : Any = []
for pi, piece in enumerate(UpperCamelCase__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(UpperCamelCase__ ) and pi != 0:
new_pieces.append(UpperCamelCase__ )
continue
else:
continue
UpperCamelCase : Optional[Any] = 0
for i, chunk in enumerate(UpperCamelCase__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(UpperCamelCase__ ) or self.is_punct(UpperCamelCase__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(UpperCamelCase__ )
UpperCamelCase : List[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : str = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : List[Any] = i
if len(UpperCamelCase__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = ''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = self.convert_ids_to_tokens(UpperCamelCase__ )
UpperCamelCase : int = ''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return self.reverse_vocab.get(UpperCamelCase__ , self.unk_token )
def __UpperCamelCase( self , A_ , A_=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : Optional[int] = [self.cls_token_id]
UpperCamelCase : Optional[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def __UpperCamelCase( self , A_ , A_=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def __UpperCamelCase( self , A_ , A_=None , A_=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(UpperCamelCase__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(UpperCamelCase__ ) + 1) + [1] * (len(UpperCamelCase__ ) + 3)
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(UpperCamelCase__ ) == 1:
UpperCamelCase : int = unicodedata.category(UpperCamelCase__ )
if cat == "Zs":
return True
return False
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {}
with io.open(UpperCamelCase__ , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(UpperCamelCase__ ):
UpperCamelCase : Union[str, Any] = line.rstrip("\n" )
UpperCamelCase : Tuple = int(UpperCamelCase__ )
return token_to_idx
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = 0
if os.path.isdir(UpperCamelCase__ ):
UpperCamelCase : int = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
UpperCamelCase : List[Any] = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
UpperCamelCase : str = token_index
writer.write(token + "\n" )
index += 1
UpperCamelCase : Optional[Any] = os.path.join(UpperCamelCase__ , "sentencepiece.bpe.model" )
with open(UpperCamelCase__ , "wb" ) as fi:
UpperCamelCase : int = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (vocab_file,)
| 708
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __snake_case ):
_UpperCAmelCase :Union[str, Any] = 'camembert'
def __init__( self , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase : List[str] = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : Dict = hidden_act
UpperCamelCase : str = intermediate_size
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : Union[str, Any] = max_position_embeddings
UpperCamelCase : Optional[Any] = type_vocab_size
UpperCamelCase : int = initializer_range
UpperCamelCase : List[str] = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : int = use_cache
UpperCamelCase : List[str] = classifier_dropout
class A__ ( __snake_case ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 38
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class A__ ( a__ ):
_UpperCAmelCase :Any = ["pixel_values"]
def __init__( self , A_ = True , A_ = None , A_ = PIL.Image.BICUBIC , A_ = True , A_ = None , A_ = 1 / 255 , A_ = True , A_ = True , A_ = None , A_ = None , **A_ , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
UpperCamelCase : List[str] = size if size is not None else {"height": 256, "width": 256}
UpperCamelCase : Optional[Any] = get_size_dict(lowerCamelCase_ )
UpperCamelCase : Tuple = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCamelCase : Optional[int] = get_size_dict(lowerCamelCase_ , param_name="crop_size" )
UpperCamelCase : int = do_resize
UpperCamelCase : Tuple = size
UpperCamelCase : Optional[int] = resample
UpperCamelCase : List[str] = do_center_crop
UpperCamelCase : Union[str, Any] = crop_size
UpperCamelCase : Dict = do_rescale
UpperCamelCase : int = rescale_factor
UpperCamelCase : str = do_normalize
UpperCamelCase : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase( self , A_ , A_ , A_ = PIL.Image.BICUBIC , A_ = None , **A_ , ):
'''simple docstring'''
UpperCamelCase : Any = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
lowerCamelCase_ , size=(size["height"], size["width"]) , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCamelCase( self , A_ , A_ , A_ = None , **A_ , ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(lowerCamelCase_ , size=(size["height"], size["width"]) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCamelCase( self , A_ , A_ , A_ = None , **A_ , ):
'''simple docstring'''
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ = None , **A_ , ):
'''simple docstring'''
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCamelCase( self , A_ , A_ = None , A_ = None , A_=None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ):
'''simple docstring'''
UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase : Optional[Any] = resample if resample is not None else self.resample
UpperCamelCase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean
UpperCamelCase : List[str] = image_std if image_std is not None else self.image_std
UpperCamelCase : Tuple = size if size is not None else self.size
UpperCamelCase : Any = get_size_dict(lowerCamelCase_ )
UpperCamelCase : Dict = crop_size if crop_size is not None else self.crop_size
UpperCamelCase : List[Any] = get_size_dict(lowerCamelCase_ , param_name="crop_size" )
UpperCamelCase : Optional[Any] = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase : Any = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
UpperCamelCase : List[str] = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
UpperCamelCase : Optional[Any] = [self.center_crop(image=lowerCamelCase_ , size=lowerCamelCase_ ) for image in images]
if do_rescale:
UpperCamelCase : Any = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
UpperCamelCase : Union[str, Any] = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
UpperCamelCase : Optional[int] = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
UpperCamelCase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
| 709
|
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
return int(input_a == input_a == 0 )
def A_ ( ) -> None:
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 38
| 0
|
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def A_ ( _lowerCAmelCase ) -> Tuple:
UpperCamelCase : List[Any] = VideoMAEConfig()
set_architecture_configs(_lowerCAmelCase , _lowerCAmelCase )
if "finetuned" not in model_name:
UpperCamelCase : int = False
if "finetuned" in model_name:
UpperCamelCase : Optional[int] = "huggingface/label-files"
if "kinetics" in model_name:
UpperCamelCase : List[str] = 400
UpperCamelCase : int = "kinetics400-id2label.json"
elif "ssv2" in model_name:
UpperCamelCase : int = 174
UpperCamelCase : int = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
UpperCamelCase : str = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
UpperCamelCase : Optional[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase : int = idalabel
UpperCamelCase : int = {v: k for k, v in idalabel.items()}
return config
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
if "small" in model_name:
UpperCamelCase : Optional[int] = 384
UpperCamelCase : Tuple = 1536
UpperCamelCase : Optional[int] = 12
UpperCamelCase : Dict = 16
UpperCamelCase : List[Any] = 12
UpperCamelCase : Optional[Any] = 3
UpperCamelCase : List[Any] = 192
UpperCamelCase : int = 768
elif "large" in model_name:
UpperCamelCase : List[Any] = 1024
UpperCamelCase : List[str] = 4096
UpperCamelCase : Optional[int] = 24
UpperCamelCase : Optional[int] = 16
UpperCamelCase : Dict = 12
UpperCamelCase : Dict = 8
UpperCamelCase : Dict = 512
UpperCamelCase : Union[str, Any] = 2048
elif "huge" in model_name:
UpperCamelCase : List[Any] = 1280
UpperCamelCase : Optional[Any] = 5120
UpperCamelCase : Dict = 32
UpperCamelCase : List[Any] = 16
UpperCamelCase : int = 12
UpperCamelCase : Tuple = 8
UpperCamelCase : Tuple = 640
UpperCamelCase : List[Any] = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def A_ ( _lowerCAmelCase ) -> List[str]:
if "encoder." in name:
UpperCamelCase : List[str] = name.replace("encoder." , "" )
if "cls_token" in name:
UpperCamelCase : Any = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
UpperCamelCase : str = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase : Dict = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCamelCase : Union[str, Any] = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
UpperCamelCase : List[Any] = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
UpperCamelCase : Dict = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
UpperCamelCase : Optional[int] = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
UpperCamelCase : Union[str, Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
UpperCamelCase : Tuple = name.replace("attn" , "attention.self" )
if "attn" in name:
UpperCamelCase : int = name.replace("attn" , "attention.attention" )
if "norm1" in name:
UpperCamelCase : List[str] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCamelCase : Any = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCamelCase : Optional[Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCamelCase : List[str] = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
UpperCamelCase : str = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
UpperCamelCase : List[str] = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
UpperCamelCase : Optional[int] = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
UpperCamelCase : List[Any] = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
UpperCamelCase : Any = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
UpperCamelCase : List[str] = name.replace("head" , "classifier" )
return name
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
for key in orig_state_dict.copy().keys():
UpperCamelCase : Any = orig_state_dict.pop(_lowerCAmelCase )
if key.startswith("encoder." ):
UpperCamelCase : str = key.replace("encoder." , "" )
if "qkv" in key:
UpperCamelCase : str = key.split("." )
if key.startswith("decoder.blocks" ):
UpperCamelCase : Optional[int] = config.decoder_hidden_size
UpperCamelCase : List[Any] = int(key_split[2] )
UpperCamelCase : List[Any] = "decoder.decoder_layers."
if "weight" in key:
UpperCamelCase : str = val[:dim, :]
UpperCamelCase : str = val[dim : dim * 2, :]
UpperCamelCase : Union[str, Any] = val[-dim:, :]
else:
UpperCamelCase : Union[str, Any] = config.hidden_size
UpperCamelCase : Tuple = int(key_split[1] )
UpperCamelCase : Dict = "videomae.encoder.layer."
if "weight" in key:
UpperCamelCase : List[str] = val[:dim, :]
UpperCamelCase : Optional[Any] = val[dim : dim * 2, :]
UpperCamelCase : List[Any] = val[-dim:, :]
else:
UpperCamelCase : int = val
return orig_state_dict
def A_ ( ) -> Tuple:
UpperCamelCase : Optional[int] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
UpperCamelCase : int = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : int = get_videomae_config(_lowerCAmelCase )
if "finetuned" in model_name:
UpperCamelCase : Tuple = VideoMAEForVideoClassification(_lowerCAmelCase )
else:
UpperCamelCase : str = VideoMAEForPreTraining(_lowerCAmelCase )
# download original checkpoint, hosted on Google Drive
UpperCamelCase : Tuple = "pytorch_model.bin"
gdown.cached_download(_lowerCAmelCase , _lowerCAmelCase , quiet=_lowerCAmelCase )
UpperCamelCase : Optional[int] = torch.load(_lowerCAmelCase , map_location="cpu" )
if "model" in files:
UpperCamelCase : Any = files["model"]
else:
UpperCamelCase : List[str] = files["module"]
UpperCamelCase : List[str] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# verify model on basic input
UpperCamelCase : Tuple = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
UpperCamelCase : str = prepare_video()
UpperCamelCase : Dict = image_processor(_lowerCAmelCase , return_tensors="pt" )
if "finetuned" not in model_name:
UpperCamelCase : Optional[Any] = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
UpperCamelCase : Union[str, Any] = torch.load(_lowerCAmelCase )
UpperCamelCase : str = model(**_lowerCAmelCase )
UpperCamelCase : Dict = outputs.logits
UpperCamelCase : List[str] = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
UpperCamelCase : int = torch.Size([1, 400] )
UpperCamelCase : int = torch.tensor([-0.9_291, -0.4_061, -0.9_307] )
elif model_name == "videomae-small-finetuned-ssv2":
UpperCamelCase : Dict = torch.Size([1, 174] )
UpperCamelCase : Dict = torch.tensor([0.2_671, -0.4_689, -0.8_235] )
elif model_name == "videomae-base":
UpperCamelCase : List[str] = torch.Size([1, 1408, 1536] )
UpperCamelCase : Union[str, Any] = torch.tensor([[0.7_739, 0.7_968, 0.7_089], [0.6_701, 0.7_487, 0.6_209], [0.4_287, 0.5_158, 0.4_773]] )
elif model_name == "videomae-base-short":
UpperCamelCase : Optional[Any] = torch.Size([1, 1408, 1536] )
UpperCamelCase : Tuple = torch.tensor([[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] )
# we verified the loss both for normalized and unnormalized targets for this one
UpperCamelCase : Optional[int] = torch.tensor([0.5_142] ) if config.norm_pix_loss else torch.tensor([0.6_469] )
elif model_name == "videomae-large":
UpperCamelCase : int = torch.Size([1, 1408, 1536] )
UpperCamelCase : int = torch.tensor([[0.7_149, 0.7_997, 0.6_966], [0.6_768, 0.7_869, 0.6_948], [0.5_139, 0.6_221, 0.5_605]] )
elif model_name == "videomae-large-finetuned-kinetics":
UpperCamelCase : Any = torch.Size([1, 400] )
UpperCamelCase : List[Any] = torch.tensor([0.0_771, 0.0_011, -0.3_625] )
elif model_name == "videomae-huge-finetuned-kinetics":
UpperCamelCase : Union[str, Any] = torch.Size([1, 400] )
UpperCamelCase : Optional[int] = torch.tensor([0.2_433, 0.1_632, -0.4_894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
UpperCamelCase : List[str] = torch.Size([1, 400] )
UpperCamelCase : Tuple = torch.tensor([0.6_588, 0.0_990, -0.2_493] )
elif model_name == "videomae-base-finetuned-kinetics":
UpperCamelCase : str = torch.Size([1, 400] )
UpperCamelCase : Optional[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] )
elif model_name == "videomae-base-short-ssv2":
UpperCamelCase : int = torch.Size([1, 1408, 1536] )
UpperCamelCase : str = torch.tensor([[0.4_712, 0.5_296, 0.5_786], [0.2_278, 0.2_729, 0.4_026], [0.0_352, 0.0_730, 0.2_506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
UpperCamelCase : Optional[Any] = torch.Size([1, 174] )
UpperCamelCase : Dict = torch.tensor([-0.0_537, -0.1_539, -0.3_266] )
elif model_name == "videomae-base-ssv2":
UpperCamelCase : Optional[int] = torch.Size([1, 1408, 1536] )
UpperCamelCase : Dict = torch.tensor([[0.8_131, 0.8_727, 0.8_546], [0.7_366, 0.9_377, 0.8_870], [0.5_935, 0.8_874, 0.8_564]] )
elif model_name == "videomae-base-finetuned-ssv2":
UpperCamelCase : Optional[Any] = torch.Size([1, 174] )
UpperCamelCase : Any = torch.tensor([0.1_961, -0.8_337, -0.6_389] )
else:
raise ValueError(F"""Model name not supported. Should be one of {model_names}""" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1e-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , _lowerCAmelCase , atol=1e-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
UpperCamelCase : Optional[int] = outputs.loss
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 710
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __snake_case ):
_UpperCAmelCase :Optional[int] = ['image_processor', 'tokenizer']
_UpperCAmelCase :Tuple = 'BlipImageProcessor'
_UpperCAmelCase :Optional[int] = 'AutoTokenizer'
def __init__( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = False
super().__init__(A_ , A_ )
UpperCamelCase : str = self.image_processor
def __call__( self , A_ = None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
UpperCamelCase : int = self.tokenizer
UpperCamelCase : Optional[int] = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
return text_encoding
# add pixel_values
UpperCamelCase : int = self.image_processor(A_ , return_tensors=A_ )
if text is not None:
UpperCamelCase : Dict = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
else:
UpperCamelCase : Dict = None
if text_encoding is not None:
encoding_image_processor.update(A_ )
return encoding_image_processor
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.tokenizer.model_input_names
UpperCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 38
| 0
|
'''simple docstring'''
class A__ :
def __init__( self , A_ , A_=None , A_=None ):
'''simple docstring'''
UpperCamelCase : int = data
UpperCamelCase : Optional[int] = previous
UpperCamelCase : List[Any] = next_node
def __str__( self ):
'''simple docstring'''
return F"""{self.data}"""
def __UpperCamelCase( self ):
'''simple docstring'''
return self.data
def __UpperCamelCase( self ):
'''simple docstring'''
return self.next
def __UpperCamelCase( self ):
'''simple docstring'''
return self.previous
class A__ :
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : int = head
def __iter__( self ):
'''simple docstring'''
return self
def __UpperCamelCase( self ):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
UpperCamelCase : Optional[Any] = self.current.get_data()
UpperCamelCase : str = self.current.get_next()
return value
class A__ :
def __init__( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = None # First node in list
UpperCamelCase : Optional[int] = None # Last node in list
def __str__( self ):
'''simple docstring'''
UpperCamelCase : int = self.head
UpperCamelCase : Optional[Any] = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase : Optional[int] = current.get_next()
return " ".join(str(lowerCAmelCase_ ) for node in nodes )
def __contains__( self , A_ ):
'''simple docstring'''
UpperCamelCase : int = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase : List[str] = current.get_next()
return False
def __iter__( self ):
'''simple docstring'''
return LinkedListIterator(self.head )
def __UpperCamelCase( self ):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def __UpperCamelCase( self ):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if self.head is None:
UpperCamelCase : Any = node
UpperCamelCase : Dict = node
else:
self.insert_before_node(self.head , lowerCAmelCase_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if self.head is None:
self.set_head(lowerCAmelCase_ )
else:
self.insert_after_node(self.tail , lowerCAmelCase_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = Node(lowerCAmelCase_ )
if self.head is None:
self.set_head(lowerCAmelCase_ )
else:
self.set_tail(lowerCAmelCase_ )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = node
UpperCamelCase : List[str] = node.previous
if node.get_previous() is None:
UpperCamelCase : Any = node_to_insert
else:
UpperCamelCase : Any = node_to_insert
UpperCamelCase : Any = node_to_insert
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = node
UpperCamelCase : Dict = node.next
if node.get_next() is None:
UpperCamelCase : int = node_to_insert
else:
UpperCamelCase : str = node_to_insert
UpperCamelCase : Tuple = node_to_insert
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = 1
UpperCamelCase : Any = Node(lowerCAmelCase_ )
UpperCamelCase : int = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCAmelCase_ , lowerCAmelCase_ )
return
current_position += 1
UpperCamelCase : Optional[int] = node.next
self.insert_after_node(self.tail , lowerCAmelCase_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase : Optional[Any] = node.get_next()
raise Exception("Node not found" )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if (node := self.get_node(lowerCAmelCase_ )) is not None:
if node == self.head:
UpperCamelCase : Dict = self.head.get_next()
if node == self.tail:
UpperCamelCase : List[str] = self.tail.get_previous()
self.remove_node_pointers(lowerCAmelCase_ )
@staticmethod
def __UpperCamelCase( A_ ):
'''simple docstring'''
if node.get_next():
UpperCamelCase : Optional[Any] = node.previous
if node.get_previous():
UpperCamelCase : int = node.next
UpperCamelCase : Dict = None
UpperCamelCase : Optional[Any] = None
def __UpperCamelCase( self ):
'''simple docstring'''
return self.head is None
def A_ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
class A__ ( __snake_case ):
_UpperCAmelCase :Tuple = ['audio_values', 'audio_mask']
def __init__( self , A_=2048 , A_=1 , A_=[16, 16] , A_=128 , A_=4_4100 , A_=86 , A_=2048 , A_=0.0 , **A_ , ):
'''simple docstring'''
super().__init__(
feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ , )
UpperCamelCase : Optional[int] = spectrogram_length
UpperCamelCase : Dict = num_channels
UpperCamelCase : Optional[Any] = patch_size
UpperCamelCase : str = feature_size // self.patch_size[1]
UpperCamelCase : List[str] = n_fft
UpperCamelCase : int = sampling_rate // hop_length_to_sampling_rate
UpperCamelCase : Optional[int] = sampling_rate
UpperCamelCase : int = padding_value
UpperCamelCase : str = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A_ , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=A_ , norm="slaney" , mel_scale="slaney" , ).T
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = spectrogram(
A_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
UpperCamelCase : List[Any] = log_spec[:, :-1]
UpperCamelCase : Optional[int] = log_spec - 20.0
UpperCamelCase : str = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , A_ , A_ = None , A_ = True , A_ = None , A_ = False , A_ = False , **A_ , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCamelCase : Optional[int] = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
UpperCamelCase : str = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
UpperCamelCase : str = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A_ ):
UpperCamelCase : int = [np.asarray(A_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
UpperCamelCase : List[str] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
UpperCamelCase : str = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
UpperCamelCase : Tuple = np.array(A_ ).astype(np.floataa )
# convert into correct format for padding
UpperCamelCase : Union[str, Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
UpperCamelCase : Any = np.ones([len(A_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
UpperCamelCase : List[str] = padded_audio_features * self.padding_value
for i in range(len(A_ ) ):
UpperCamelCase : Union[str, Any] = audio_features[i]
UpperCamelCase : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
UpperCamelCase : Optional[Any] = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
UpperCamelCase : int = {"audio_values": padded_audio_features}
UpperCamelCase : Any = BatchFeature(data=A_ , tensor_type=A_ )
return encoded_inputs
| 38
| 0
|
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase ) -> Optional[int]:
UpperCamelCase : Tuple = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
UpperCamelCase : Any = re.match(r"^mobilenet_v1_([^_]*)_([^_]*)$" , a_ )
if matches:
UpperCamelCase : List[Any] = float(matches[1] )
UpperCamelCase : Tuple = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
UpperCamelCase : List[str] = 1001
UpperCamelCase : Union[str, Any] = '''imagenet-1k-id2label.json'''
UpperCamelCase : Optional[int] = '''huggingface/label-files'''
UpperCamelCase : Tuple = json.load(open(hf_hub_download(a_ , a_ , repo_type="dataset" ) , "r" ) )
UpperCamelCase : Any = {int(a_ ) + 1: v for k, v in idalabel.items()}
UpperCamelCase : int = '''background'''
UpperCamelCase : str = idalabel
UpperCamelCase : List[Any] = {v: k for k, v in idalabel.items()}
return config
def A_ ( ) -> List[str]:
UpperCamelCase : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase : Optional[Any] = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> List[Any]:
UpperCamelCase : int = get_mobilenet_va_config(a_ )
# Load 🤗 model
UpperCamelCase : Any = MobileNetVaForImageClassification(a_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(a_ , a_ , a_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
UpperCamelCase : Tuple = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
UpperCamelCase : Tuple = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCamelCase : Optional[int] = model(**a_ )
UpperCamelCase : Optional[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
UpperCamelCase : Optional[int] = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
UpperCamelCase : str = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
UpperCamelCase : Optional[Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , a_ , atol=1e-4 )
Path(a_ ).mkdir(exist_ok=a_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a_ )
if push_to_hub:
print("Pushing to the hub..." )
UpperCamelCase : Union[str, Any] = '''google/''' + model_name
image_processor.push_to_hub(a_ )
model.push_to_hub(a_ )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowerCamelCase : Any = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 712
|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCamelCase : Dict = TypeVar("""KT""")
__lowerCamelCase : Dict = TypeVar("""VT""")
class A__ ( Generic[KT, VT] ):
def __init__( self , A_ = "root" , A_ = None ):
'''simple docstring'''
UpperCamelCase : int = key
UpperCamelCase : List[Any] = value
UpperCamelCase : list[Node[KT, VT]] = []
def __repr__( self ):
'''simple docstring'''
return F"""Node({self.key}: {self.value})"""
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return len(self.forward )
class A__ ( Generic[KT, VT] ):
def __init__( self , A_ = 0.5 , A_ = 16 ):
'''simple docstring'''
UpperCamelCase : Node[KT, VT] = Node[KT, VT]()
UpperCamelCase : List[Any] = 0
UpperCamelCase : Union[str, Any] = p
UpperCamelCase : List[str] = max_level
def __str__( self ):
'''simple docstring'''
UpperCamelCase : int = list(self )
if len(A_ ) == 0:
return F"""SkipList(level={self.level})"""
UpperCamelCase : str = max((len(str(A_ ) ) for item in items) , default=4 )
UpperCamelCase : Dict = max(A_ , 4 ) + 4
UpperCamelCase : str = self.head
UpperCamelCase : List[Any] = []
UpperCamelCase : int = node.forward.copy()
lines.append(F"""[{node.key}]""".ljust(A_ , "-" ) + "* " * len(A_ ) )
lines.append(" " * label_size + "| " * len(A_ ) )
while len(node.forward ) != 0:
UpperCamelCase : Union[str, Any] = node.forward[0]
lines.append(
F"""[{node.key}]""".ljust(A_ , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(A_ ) )
UpperCamelCase : Tuple = node.forward
lines.append("None".ljust(A_ ) + "* " * len(A_ ) )
return F"""SkipList(level={self.level})\n""" + "\n".join(A_ )
def __iter__( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
UpperCamelCase : Union[str, Any] = node.forward[0]
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = []
UpperCamelCase : List[Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
UpperCamelCase : str = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(A_ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : str = self._locate_node(A_ )
if node is not None:
for i, update_node in enumerate(A_ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
UpperCamelCase : Tuple = node.forward[i]
else:
UpperCamelCase : List[Any] = update_node.forward[:i]
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[int] = self._locate_node(A_ )
if node is not None:
UpperCamelCase : Union[str, Any] = value
else:
UpperCamelCase : Dict = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , A_ ):
update_vector.append(self.head )
UpperCamelCase : Optional[int] = level
UpperCamelCase : Dict = Node(A_ , A_ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(A_ )
else:
UpperCamelCase : List[Any] = new_node
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Union[str, Any] = self._locate_node(A_ )
if node is not None:
return node.value
return None
def A_ ( ) -> List[Any]:
UpperCamelCase : int = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 12 )
skip_list.insert("Key3" , 41 )
skip_list.insert("Key4" , -19 )
UpperCamelCase : Optional[int] = skip_list.head
UpperCamelCase : List[str] = {}
while node.level != 0:
UpperCamelCase : str = node.forward[0]
UpperCamelCase : Optional[int] = node.value
assert len(_lowerCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def A_ ( ) -> List[Any]:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert("Key1" , 10 )
skip_list.insert("Key1" , 12 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 10 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 10 )
UpperCamelCase : Dict = skip_list.head
UpperCamelCase : Tuple = {}
while node.level != 0:
UpperCamelCase : List[str] = node.forward[0]
UpperCamelCase : Dict = node.value
if len(_lowerCAmelCase ) != 4:
print()
assert len(_lowerCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def A_ ( ) -> List[Any]:
UpperCamelCase : List[Any] = SkipList()
assert skip_list.find("Some key" ) is None
def A_ ( ) -> Tuple:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert("Key2" , 20 )
assert skip_list.find("Key2" ) == 20
skip_list.insert("Some Key" , 10 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 13 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 10
assert skip_list.find("V" ) == 13
def A_ ( ) -> Dict:
UpperCamelCase : Optional[int] = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def A_ ( ) -> Dict:
UpperCamelCase : List[Any] = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def A_ ( ) -> List[str]:
UpperCamelCase : int = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 14
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def A_ ( ) -> List[Any]:
UpperCamelCase : List[Any] = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 142 )
skip_list.insert("Key2" , 15 )
skip_list.delete("X" )
def traverse_keys(_lowerCAmelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_lowerCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def A_ ( ) -> Union[str, Any]:
def is_sorted(_lowerCAmelCase ):
return all(next_item >= item for item, next_item in zip(_lowerCAmelCase , lst[1:] ) )
UpperCamelCase : int = SkipList()
for i in range(10 ):
skip_list.insert(_lowerCAmelCase , _lowerCAmelCase )
assert is_sorted(list(_lowerCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_lowerCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_lowerCAmelCase ) )
def A_ ( ) -> Tuple:
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def A_ ( ) -> List[str]:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 38
| 0
|
def A_ ( _lowerCAmelCase = 10 , _lowerCAmelCase = 1000 , _lowerCAmelCase = True ) -> int:
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
return int((number_a + number_a) / 2 )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(_lowerCAmelCase ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
UpperCamelCase : Tuple = lower
UpperCamelCase : Tuple = higher
UpperCamelCase : List[str] = []
while True:
UpperCamelCase : List[Any] = get_avg(UpperCAmelCase__ , UpperCAmelCase__ )
last_numbers.append(UpperCAmelCase__ )
if answer(UpperCAmelCase__ ) == "low":
UpperCamelCase : int = number
elif answer(UpperCAmelCase__ ) == "high":
UpperCamelCase : Union[str, Any] = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def A_ ( ) -> Optional[int]:
UpperCamelCase : Optional[int] = int(input("Enter lower value : " ).strip() )
UpperCamelCase : List[Any] = int(input("Enter high value : " ).strip() )
UpperCamelCase : List[Any] = int(input("Enter value to guess : " ).strip() )
guess_the_number(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 713
|
from PIL import Image
def A_ ( _lowerCAmelCase ) -> Image:
UpperCamelCase , UpperCamelCase : List[Any] = image.size
UpperCamelCase : Union[str, Any] = 0
UpperCamelCase : List[str] = image.load()
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
UpperCamelCase : List[Any] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCAmelCase ):
for i in range(_lowerCAmelCase ):
UpperCamelCase : Union[str, Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 38
| 0
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A_ ( _lowerCAmelCase ) -> Optional[int]:
UpperCamelCase : str = filter(lambda _lowerCAmelCase : p.requires_grad , model.parameters() )
UpperCamelCase : Any = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__lowerCamelCase : int = logging.getLogger(__name__)
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
if metric == "rouge2":
UpperCamelCase : Dict = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
UpperCamelCase : int = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
UpperCamelCase : Optional[int] = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
UpperCamelCase : Union[str, Any] = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
UpperCamelCase : str = ModelCheckpoint(
dirpath=UpperCAmelCase__ , filename=UpperCAmelCase__ , monitor=F"""val_{metric}""" , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
return EarlyStopping(
monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=UpperCAmelCase__ , verbose=UpperCAmelCase__ , )
class A__ ( pl.Callback ):
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = {F"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__UpperCamelCase )
@rank_zero_only
def __UpperCamelCase( self , A_ , A_ , A_ , A_=True ):
'''simple docstring'''
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
UpperCamelCase : Tuple = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
UpperCamelCase : Dict = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCamelCase : Optional[int] = od / "test_results.txt"
UpperCamelCase : Optional[int] = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCamelCase : Dict = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
UpperCamelCase : List[Any] = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__UpperCamelCase )
generations_file.parent.mkdir(exist_ok=__UpperCamelCase )
with open(__UpperCamelCase , "a+" ) as writer:
for key in sorted(__UpperCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCamelCase : Optional[Any] = metrics[key]
if isinstance(__UpperCamelCase , torch.Tensor ):
UpperCamelCase : Tuple = val.item()
UpperCamelCase : Optional[int] = F"""{key}: {val:.6f}\n"""
writer.write(__UpperCamelCase )
if not save_generations:
return
if "preds" in metrics:
UpperCamelCase : List[Any] = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__UpperCamelCase )
@rank_zero_only
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
try:
UpperCamelCase : Tuple = pl_module.model.model.num_parameters()
except AttributeError:
UpperCamelCase : Dict = pl_module.model.num_parameters()
UpperCamelCase : Union[str, Any] = count_trainable_parameters(__UpperCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__UpperCamelCase , __UpperCamelCase , "test" )
@rank_zero_only
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 714
|
from math import loga
def A_ ( _lowerCAmelCase ) -> int:
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38
| 0
|
__lowerCamelCase : str = """Tobias Carryer"""
from time import time
class A__ :
'''simple docstring'''
def __init__( self , A_ , A_ , A_ , A_=int(time() ) ): # noqa: B008
'''simple docstring'''
UpperCamelCase : Optional[Any] = multiplier
UpperCamelCase : int = increment
UpperCamelCase : Dict = modulo
UpperCamelCase : Union[str, Any] = seed
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__lowerCamelCase : str = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 715
|
from __future__ import annotations
__lowerCamelCase : Optional[int] = """Muhammad Umer Farooq"""
__lowerCamelCase : Tuple = """MIT"""
__lowerCamelCase : Optional[int] = """1.0.0"""
__lowerCamelCase : int = """Muhammad Umer Farooq"""
__lowerCamelCase : Optional[int] = """contact@muhammadumerfarooq.me"""
__lowerCamelCase : Dict = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase : list[str] = []
UpperCamelCase : str = domain
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
UpperCamelCase : Any = parse.urljoin(self.domain , A_ )
self.urls.append(A_ )
def A_ ( _lowerCAmelCase ) -> str:
return ".".join(get_sub_domain_name(_lowerCAmelCase ).split("." )[-2:] )
def A_ ( _lowerCAmelCase ) -> str:
return parse.urlparse(_lowerCAmelCase ).netloc
def A_ ( _lowerCAmelCase = "https://github.com" ) -> list[str]:
UpperCamelCase : int = get_domain_name(_lowerCAmelCase )
# Initialize the parser
UpperCamelCase : str = Parser(_lowerCAmelCase )
try:
# Open URL
UpperCamelCase : int = requests.get(_lowerCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
UpperCamelCase : Optional[Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
UpperCamelCase : Optional[Any] = requests.get(_lowerCAmelCase )
# Get the valid email.
UpperCamelCase : Optional[int] = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_lowerCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Tuple = emails_from_url("""https://github.com""")
print(f"""{len(emails)} emails found:""")
print("""\n""".join(sorted(emails)))
| 38
| 0
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def A_ ( ) -> Optional[int]:
UpperCamelCase : Optional[Any] = ArgumentParser("Diffusers CLI tool" , usage="diffusers-cli <command> [<args>]" )
UpperCamelCase : Any = parser.add_subparsers(help="diffusers-cli command helpers" )
# Register commands
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
# Let's go
UpperCamelCase : int = parser.parse_args()
if not hasattr(_lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase : List[Any] = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 716
|
from __future__ import annotations
def A_ ( _lowerCAmelCase ) -> list[int]:
UpperCamelCase : Optional[Any] = [True] * limit
UpperCamelCase : Optional[Any] = False
UpperCamelCase : List[str] = False
UpperCamelCase : Tuple = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase : Optional[Any] = i * 2
while index < limit:
UpperCamelCase : int = False
UpperCamelCase : Optional[int] = index + i
UpperCamelCase : Any = [2]
for i in range(3 , _lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCAmelCase )
return primes
def A_ ( _lowerCAmelCase = 100_0000 ) -> int:
UpperCamelCase : Union[str, Any] = prime_sieve(_lowerCAmelCase )
UpperCamelCase : List[str] = 0
UpperCamelCase : Union[str, Any] = 0
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + length , len(_lowerCAmelCase ) ):
UpperCamelCase : Dict = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase : int = j - i
UpperCamelCase : Dict = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 38
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCamelCase : Tuple = {
"configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
"NezhaForNextSentencePrediction",
"NezhaForMaskedLM",
"NezhaForPreTraining",
"NezhaForMultipleChoice",
"NezhaForQuestionAnswering",
"NezhaForSequenceClassification",
"NezhaForTokenClassification",
"NezhaModel",
"NezhaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 717
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class A__ ( __snake_case ):
def __init__( self , A_ , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , A_ = None , **A_ , ):
'''simple docstring'''
super().__init__(
features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , num_proc=A_ , **A_ , )
UpperCamelCase : Optional[int] = Generator(
cache_dir=A_ , features=A_ , generator=A_ , gen_kwargs=A_ , **A_ , )
def __UpperCamelCase( self ):
'''simple docstring'''
if self.streaming:
UpperCamelCase : Optional[Any] = self.builder.as_streaming_dataset(split="train" )
# Build regular (map-style) dataset
else:
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : List[Any] = None
UpperCamelCase : List[str] = None
self.builder.download_and_prepare(
download_config=A_ , download_mode=A_ , verification_mode=A_ , base_path=A_ , num_proc=self.num_proc , )
UpperCamelCase : int = self.builder.as_dataset(
split="train" , verification_mode=A_ , in_memory=self.keep_in_memory )
return dataset
| 38
| 0
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__lowerCamelCase : Optional[int] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__lowerCamelCase : Any = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A_ ( ):
UpperCamelCase : List[Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
UpperCamelCase : List[Any] = bs[:]
UpperCamelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
UpperCamelCase : Tuple = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ , snake_case_ ) )
def A_ ( _lowerCAmelCase ):
UpperCamelCase : List[str] = set()
UpperCamelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase : Dict = char
return pairs
class A__ ( UpperCamelCase_ ):
_UpperCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase :Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self , A_ , A_ , A_="replace" , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=False , **A_ , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
UpperCamelCase : List[str] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
UpperCamelCase : Optional[Any] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
UpperCamelCase : Tuple = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
UpperCamelCase : Optional[int] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
UpperCamelCase : Optional[int] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase : List[str] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , )
with open(A_ , encoding="utf-8" ) as vocab_handle:
UpperCamelCase : int = json.load(A_ )
UpperCamelCase : Tuple = {v: k for k, v in self.encoder.items()}
UpperCamelCase : Union[str, Any] = errors # how to handle errors in decoding
UpperCamelCase : Tuple = bytes_to_unicode()
UpperCamelCase : int = {v: k for k, v in self.byte_encoder.items()}
with open(A_ , encoding="utf-8" ) as merges_handle:
UpperCamelCase : List[Any] = merges_handle.read().split("\n" )[1:-1]
UpperCamelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase : List[Any] = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase : int = {}
UpperCamelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase : Union[str, Any] = re.compile(R"\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase( self ):
'''simple docstring'''
return len(self.encoder )
def __UpperCamelCase( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCamelCase : Optional[Any] = tuple(A_ )
UpperCamelCase : Union[str, Any] = get_pairs(A_ )
if not pairs:
return token
while True:
UpperCamelCase : Union[str, Any] = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase : Optional[Any] = bigram
UpperCamelCase : List[str] = []
UpperCamelCase : Optional[int] = 0
while i < len(A_ ):
try:
UpperCamelCase : Union[str, Any] = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase : Optional[int] = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase : List[Any] = tuple(A_ )
UpperCamelCase : Optional[Any] = new_word
if len(A_ ) == 1:
break
else:
UpperCamelCase : Dict = get_pairs(A_ )
UpperCamelCase : List[str] = ''' '''.join(A_ )
UpperCamelCase : int = word
return word
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = []
for token in re.findall(self.pat , A_ ):
UpperCamelCase : List[str] = ''''''.join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(" " ) )
return bpe_tokens
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return self.decoder.get(A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : int = ''''''.join(A_ )
UpperCamelCase : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase : Optional[Any] = os.path.join(
A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase : Optional[Any] = os.path.join(
A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(A_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + "\n" )
UpperCamelCase : Optional[int] = 0
with open(A_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCamelCase : int = token_index
writer.write(" ".join(A_ ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCamelCase( self , A_ , A_ = None , A_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Tuple = [self.sep_token_id]
UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase( self , A_ , A_=False , **A_ ):
'''simple docstring'''
UpperCamelCase : int = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()):
UpperCamelCase : Dict = ''' ''' + text
return (text, kwargs)
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : int = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(A_ )
UpperCamelCase : List[Any] = ''' '''.join(A_ )
UpperCamelCase : Tuple = self.encode(A_ )
if len(A_ ) > self.model_max_length:
UpperCamelCase : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 718
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def A_ ( _lowerCAmelCase ) -> Union[str, Any]: # picklable for multiprocessing
return x.sum()
def A_ ( _lowerCAmelCase ) -> Optional[Any]: # picklable for multiprocessing
return i + 1
@dataclass
class A__ :
_UpperCAmelCase :int
_UpperCAmelCase :str
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[Any] = []
UpperCamelCase : List[Any] = 1
UpperCamelCase : Tuple = [1, 2]
UpperCamelCase : Optional[Any] = {"a": 1, "b": 2}
UpperCamelCase : Optional[Any] = {"a": [1, 2], "b": [3, 4]}
UpperCamelCase : Any = {"a": {"1": 1}, "b": 2}
UpperCamelCase : List[str] = {"a": 1, "b": 2, "c": 3, "d": 4}
UpperCamelCase : Dict = {}
UpperCamelCase : Any = []
UpperCamelCase : Any = 2
UpperCamelCase : Any = [2, 3]
UpperCamelCase : Optional[Any] = {"a": 2, "b": 3}
UpperCamelCase : List[Any] = {"a": [2, 3], "b": [4, 5]}
UpperCamelCase : Tuple = {"a": {"1": 2}, "b": 3}
UpperCamelCase : Dict = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
UpperCamelCase : List[str] = 2
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
UpperCamelCase : List[str] = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
UpperCamelCase : int = {"a": 2, "b": 0, "c": 2}
UpperCamelCase : Union[str, Any] = {
"a": np.eye(2 ).astype(A_ ),
"b": np.zeros(3 ).astype(A_ ),
"c": np.ones(2 ).astype(A_ ),
}
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(A_ ): # can't pickle a local lambda
map_nested(lambda A_ : x + 1 , A_ , num_proc=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {"a": 1, "b": 2}
UpperCamelCase : List[Any] = {"a": 3, "b": 4}
UpperCamelCase : Tuple = {"a": 5, "b": 6}
UpperCamelCase : Union[str, Any] = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(A_ , A_ , A_ ) ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
class A__ :
_UpperCAmelCase :str = 'bar'
UpperCamelCase : List[Any] = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(A_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
UpperCamelCase : Union[str, Any] = {F"""{i}""": i for i in range(_lowerCAmelCase )}
UpperCamelCase : List[str] = map_nested(lambda _lowerCAmelCase : x + 10 , _lowerCAmelCase , num_proc=_lowerCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A__ ( __snake_case ):
@require_tf
def __UpperCamelCase( self ):
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase : int = layers.Dense(2 )
def gen_random_output():
UpperCamelCase : Optional[Any] = tf.random.uniform((1, 3) )
return model(A_ ).numpy()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : List[Any] = gen_random_output()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : Dict = gen_random_output()
UpperCamelCase : Optional[int] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
import torch
def gen_random_output():
UpperCamelCase : Optional[Any] = torch.nn.Linear(3 , 2 )
UpperCamelCase : Dict = torch.rand(1 , 3 )
return model(A_ ).detach().numpy()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : Dict = gen_random_output()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : Optional[int] = gen_random_output()
UpperCamelCase : List[Any] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __UpperCamelCase( self ):
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase : Optional[Any] = gen_random_output()
with temp_seed(42 ):
UpperCamelCase : Optional[Any] = gen_random_output()
UpperCamelCase : Optional[Any] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Optional[Any] = NestedDataStructure(_lowerCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
UpperCamelCase : Dict = NestedDataStructure(_lowerCAmelCase ).flatten()
assert output == expected_output
def A_ ( ) -> List[Any]:
UpperCamelCase : str = A(x=1 , y="foobar" )
UpperCamelCase : Tuple = {"x": 1, "y": "foobar"}
assert asdict(_lowerCAmelCase ) == expected_output
UpperCamelCase : List[str] = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
UpperCamelCase : Tuple = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(_lowerCAmelCase ) == expected_output
with pytest.raises(_lowerCAmelCase ):
asdict([1, A(x=10 , y="foo" )] )
def A_ ( _lowerCAmelCase ) -> Tuple:
return text.split()
def A_ ( _lowerCAmelCase ) -> Dict:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def A_ ( ) -> str:
with Pool(2 ) as pool:
UpperCamelCase : List[str] = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase : Dict = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase : Any = []
for yield_time, content in iflatmap_unordered(
_lowerCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_lowerCAmelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(_lowerCAmelCase ) == 4
| 38
| 0
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self , A_ , A_=13 , A_=32 , A_=3 , A_=4 , A_=[10, 20, 30, 40] , A_=[2, 2, 3, 2] , A_=True , A_=True , A_=37 , A_="gelu" , A_=10 , A_=0.02 , A_=["stage2", "stage3", "stage4"] , A_=3 , A_=None , ):
'''simple docstring'''
UpperCamelCase : Any = parent
UpperCamelCase : Tuple = batch_size
UpperCamelCase : List[str] = image_size
UpperCamelCase : List[Any] = num_channels
UpperCamelCase : Any = num_stages
UpperCamelCase : int = hidden_sizes
UpperCamelCase : Optional[Any] = depths
UpperCamelCase : Tuple = is_training
UpperCamelCase : Any = use_labels
UpperCamelCase : Tuple = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Any = type_sequence_label_size
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : Optional[Any] = out_features
UpperCamelCase : Dict = num_labels
UpperCamelCase : Union[str, Any] = scope
UpperCamelCase : int = num_stages
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Dict = None
if self.use_labels:
UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : int = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase( self ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __UpperCamelCase( self ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=A_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=A_ , loss_ignore_index=255 , num_labels=self.num_labels , )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = UperNetForSemanticSegmentation(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Tuple = model(A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Any = config_and_inputs
UpperCamelCase : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase :int = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_UpperCAmelCase :List[str] = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
_UpperCAmelCase :List[Any] = False
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :Any = False
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :Any = False
_UpperCAmelCase :Optional[Any] = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = UperNetModelTester(self )
UpperCamelCase : Union[str, Any] = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase( self ):
'''simple docstring'''
return
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : str = model_class(A_ )
UpperCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Dict = [*signature.parameters.keys()]
UpperCamelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_ )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
def check_hidden_states_output(A_ , A_ , A_ ):
UpperCamelCase : Any = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCamelCase : Tuple = model(**self._prepare_for_class(A_ , A_ ) )
UpperCamelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(A_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : List[str] = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : List[Any] = True
check_hidden_states_output(A_ , A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : List[str] = _config_zero_init(A_ )
UpperCamelCase : Optional[Any] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = model_class(config=A_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="UperNet does not have tied weights" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Tuple = UperNetForSemanticSegmentation.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A_ ( ) -> Optional[Any]:
UpperCamelCase : str = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
UpperCamelCase : Tuple = Image.open(UpperCamelCase__ ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
UpperCamelCase : Tuple = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(A_ )
UpperCamelCase : Any = prepare_img()
UpperCamelCase : Optional[int] = processor(images=A_ , return_tensors="pt" ).to(A_ )
with torch.no_grad():
UpperCamelCase : Optional[int] = model(**A_ )
UpperCamelCase : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase : List[Any] = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , A_ , atol=1e-4 ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
UpperCamelCase : Dict = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(A_ )
UpperCamelCase : Optional[Any] = prepare_img()
UpperCamelCase : Tuple = processor(images=A_ , return_tensors="pt" ).to(A_ )
with torch.no_grad():
UpperCamelCase : List[Any] = model(**A_ )
UpperCamelCase : str = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase : Tuple = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , A_ , atol=1e-4 ) )
| 719
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Tuple = ['note_seq']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["note_seq"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
| 38
| 0
|
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :Tuple = LayoutLMTokenizer
_UpperCAmelCase :Any = LayoutLMTokenizerFast
_UpperCAmelCase :Optional[int] = True
_UpperCAmelCase :List[Any] = True
def __UpperCamelCase( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase : Union[str, Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = "UNwant\u00E9d,running"
UpperCamelCase : Union[str, Any] = "unwanted, running"
return input_text, output_text
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.tokenizer_class(self.vocab_file )
UpperCamelCase : Union[str, Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(A_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [7, 4, 5, 10, 8, 9] )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
| 720
|
import math
import tensorflow as tf
from packaging import version
def A_ ( _lowerCAmelCase ) -> Any:
UpperCamelCase : List[Any] = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : Any = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def A_ ( _lowerCAmelCase ) -> Dict:
UpperCamelCase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(math.pi , x.dtype )
UpperCamelCase : Optional[Any] = tf.cast(0.044_715 , x.dtype )
UpperCamelCase : int = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_lowerCAmelCase , 3 )) ))
return x * cdf
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : str = tf.convert_to_tensor(_lowerCAmelCase )
return x * tf.tanh(tf.math.softplus(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Tuple = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(0.044_715 , x.dtype )
UpperCamelCase : Optional[Any] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : Any = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def A_ ( _lowerCAmelCase ) -> List[Any]:
return tf.clip_by_value(_gelu(_lowerCAmelCase ) , -10 , 10 )
def A_ ( _lowerCAmelCase , _lowerCAmelCase=-1 ) -> str:
UpperCamelCase , UpperCamelCase : List[Any] = tf.split(_lowerCAmelCase , 2 , axis=_lowerCAmelCase )
return a * tf.math.sigmoid(_lowerCAmelCase )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def A_ ( _lowerCAmelCase ) -> Any:
return tf.keras.activations.gelu(_lowerCAmelCase , approximate=_lowerCAmelCase )
__lowerCamelCase : Optional[int] = tf.keras.activations.gelu
__lowerCamelCase : int = approximate_gelu_wrap
else:
__lowerCamelCase : List[Any] = _gelu
__lowerCamelCase : Optional[Any] = _gelu_new
__lowerCamelCase : Any = {
"""gelu""": gelu,
"""gelu_10""": gelu_aa,
"""gelu_fast""": gelu_fast,
"""gelu_new""": gelu_new,
"""glu""": glu,
"""mish""": mish,
"""quick_gelu""": quick_gelu,
"""relu""": tf.keras.activations.relu,
"""sigmoid""": tf.keras.activations.sigmoid,
"""silu""": tf.keras.activations.swish,
"""swish""": tf.keras.activations.swish,
"""tanh""": tf.keras.activations.tanh,
}
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 38
| 0
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=1024 , _lowerCAmelCase=1024 , _lowerCAmelCase=False , **_lowerCAmelCase ) -> Union[str, Any]:
UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
UpperCamelCase : int = SeqaSeqDataset(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , type_path="train" , **_lowerCAmelCase )
UpperCamelCase : List[Any] = tok.pad_token_id
def get_lens(_lowerCAmelCase ):
UpperCamelCase : Dict = tqdm(
DataLoader(_lowerCAmelCase , batch_size=512 , num_workers=8 , shuffle=_lowerCAmelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCamelCase : Optional[int] = []
for batch in dl:
UpperCamelCase : Dict = batch["input_ids"].ne(_lowerCAmelCase ).sum(1 ).tolist()
UpperCamelCase : List[Any] = batch["labels"].ne(_lowerCAmelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(_lowerCAmelCase , _lowerCAmelCase ):
max_lens.append(max(_lowerCAmelCase , _lowerCAmelCase ) )
else:
max_lens.extend(_lowerCAmelCase )
return max_lens
UpperCamelCase : Optional[int] = get_lens(_lowerCAmelCase )
UpperCamelCase : str = SeqaSeqDataset(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , type_path="val" , **_lowerCAmelCase )
UpperCamelCase : Any = get_lens(_lowerCAmelCase )
pickle_save(_lowerCAmelCase , train_ds.len_file )
pickle_save(_lowerCAmelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 721
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :str = KandinskyVaaPipeline
_UpperCAmelCase :str = [
'image_embeds',
'negative_image_embeds',
]
_UpperCAmelCase :str = ['image_embeds', 'negative_image_embeds']
_UpperCAmelCase :List[str] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCAmelCase :List[str] = False
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 100
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : List[str] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase : Dict = UNetaDConditionModel(**A_ )
return model
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.dummy_unet
UpperCamelCase : Optional[Any] = self.dummy_movq
UpperCamelCase : Dict = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , )
UpperCamelCase : Tuple = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCamelCase( self , A_ , A_=0 ):
'''simple docstring'''
UpperCamelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
if str(A_ ).startswith("mps" ):
UpperCamelCase : Optional[Any] = torch.manual_seed(A_ )
else:
UpperCamelCase : List[Any] = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase : Optional[int] = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = "cpu"
UpperCamelCase : List[str] = self.get_dummy_components()
UpperCamelCase : Tuple = self.pipeline_class(**A_ )
UpperCamelCase : List[str] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Dict = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase : Optional[int] = output.images
UpperCamelCase : int = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : int = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
UpperCamelCase : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
UpperCamelCase : Dict = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCamelCase : Tuple = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase : str = "red cat, 4k photo"
UpperCamelCase : str = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase : Tuple = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase : int = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase : Tuple = pipeline(
image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , output_type="np" , )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A_ , A_ )
| 38
| 0
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :str = LEDTokenizer
_UpperCAmelCase :Union[str, Any] = LEDTokenizerFast
_UpperCAmelCase :Tuple = True
def __UpperCamelCase( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCamelCase : Union[str, Any] = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase : Any = {"unk_token": "<unk>"}
UpperCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(A_ ) )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def __UpperCamelCase( self ):
'''simple docstring'''
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def __UpperCamelCase( self ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCamelCase : Optional[Any] = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase : Optional[Any] = tokenizer(A_ , max_length=len(A_ ) , padding=A_ , return_tensors="pt" )
self.assertIsInstance(A_ , A_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCamelCase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(A_ , A_ )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase : Tuple = tokenizer(A_ , padding=A_ , return_tensors="pt" )
self.assertIn("input_ids" , A_ )
self.assertIn("attention_mask" , A_ )
self.assertNotIn("labels" , A_ )
self.assertNotIn("decoder_attention_mask" , A_ )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase : Union[str, Any] = tokenizer(text_target=A_ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase : List[str] = tokenizer(
["I am a small frog" * 1024, "I am a small frog"] , padding=A_ , truncation=A_ , return_tensors="pt" )
self.assertIsInstance(A_ , A_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = ["A long paragraph for summarization."]
UpperCamelCase : List[str] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase : Optional[int] = tokenizer(A_ , return_tensors="pt" )
UpperCamelCase : Tuple = tokenizer(text_target=A_ , return_tensors="pt" )
UpperCamelCase : str = inputs["input_ids"]
UpperCamelCase : Any = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase : Any = ["Summary of the text.", "Another summary."]
UpperCamelCase : int = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCamelCase : Dict = tokenizer(A_ , padding=A_ )
UpperCamelCase : Union[str, Any] = [[0] * len(A_ ) for x in encoded_output["input_ids"]]
UpperCamelCase : Tuple = tokenizer.pad(A_ )
self.assertSequenceEqual(outputs["global_attention_mask"] , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
UpperCamelCase : Tuple = self.tokenizer_class.from_pretrained(A_ , **A_ )
UpperCamelCase : Optional[Any] = "A, <mask> AllenNLP sentence."
UpperCamelCase : Union[str, Any] = tokenizer_r.encode_plus(A_ , add_special_tokens=A_ , return_token_type_ids=A_ )
UpperCamelCase : Tuple = tokenizer_p.encode_plus(A_ , add_special_tokens=A_ , return_token_type_ids=A_ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCamelCase : str = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCamelCase : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
A_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
A_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 700
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A_ ( ) -> Dict:
UpperCamelCase : Tuple = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_lowerCAmelCase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_lowerCAmelCase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_lowerCAmelCase )
return parser.parse_args()
def A_ ( ) -> Optional[int]:
UpperCamelCase : Tuple = parse_args()
# Import training_script as a module.
UpperCamelCase : Union[str, Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCamelCase : List[Any] = script_fpath.stem
UpperCamelCase : Optional[Any] = importlib.import_module(_lowerCAmelCase )
# Patch sys.argv
UpperCamelCase : List[Any] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 38
| 0
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowerCamelCase : Dict = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self , A_ , A_ = None , A_ = None , A_ = None , A_ = True , ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = [file for file in os.listdir(A_ ) if os.path.isfile(os.path.join(A_ , A_ ) )]
if identifier is not None:
UpperCamelCase : int = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(A_ , A_ ):
for n_ in n_identifier:
UpperCamelCase : Any = [file for file in files if n_ not in file]
else:
UpperCamelCase : Optional[int] = [file for file in files if n_identifier not in file]
UpperCamelCase : Dict = ignore_files or []
ignore_files.append("__init__.py" )
UpperCamelCase : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , A_ )
if only_modules:
UpperCamelCase : str = file.split("." )[0]
try:
UpperCamelCase : Union[str, Any] = getattr(A_ , A_ )
UpperCamelCase : int = doctest.DocTestSuite(A_ )
UpperCamelCase : Union[str, Any] = unittest.TextTestRunner().run(A_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
UpperCamelCase : Tuple = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = Path("src/transformers" )
UpperCamelCase : Dict = "modeling"
UpperCamelCase : Union[str, Any] = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(A_ , identifier=A_ , ignore_files=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = Path("src/transformers" )
UpperCamelCase : Tuple = "tokenization"
self.analyze_directory(A_ , identifier=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = Path("src/transformers" )
UpperCamelCase : Optional[int] = "configuration"
self.analyze_directory(A_ , identifier=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = Path("src/transformers" )
UpperCamelCase : Union[str, Any] = ["configuration", "modeling", "tokenization"]
self.analyze_directory(A_ , n_identifier=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = Path("docs/source" )
UpperCamelCase : Optional[Any] = ["favicon.ico"]
self.analyze_directory(A_ , ignore_files=A_ , only_modules=A_ )
| 701
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Union[str, Any] = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 38
| 0
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
__lowerCamelCase : str = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
__lowerCamelCase : Dict = {
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def A_ ( _lowerCAmelCase ) -> str:
UpperCamelCase : str = set()
UpperCamelCase : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase : int = char
UpperCamelCase : List[str] = set(_lowerCAmelCase )
return pairs
class A__ ( __snake_case ):
_UpperCAmelCase :Optional[int] = VOCAB_FILES_NAMES
_UpperCAmelCase :str = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A_ , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , **A_ , ):
'''simple docstring'''
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , **A_ , )
UpperCamelCase : Any = vocab_file
UpperCamelCase : Tuple = merges_file
UpperCamelCase : str = {}
UpperCamelCase : Union[str, Any] = 0
UpperCamelCase : Dict = 1
UpperCamelCase : Dict = 2
UpperCamelCase : Optional[Any] = 3
self.add_from_file(A_ )
UpperCamelCase : str = {v: k for k, v in self.encoder.items()}
with open(A_ , encoding="utf-8" ) as merges_handle:
UpperCamelCase : Optional[Any] = merges_handle.read().split("\n" )[:-1]
UpperCamelCase : Union[str, Any] = [tuple(merge.split()[:-1] ) for merge in merges]
UpperCamelCase : List[Any] = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase : Union[str, Any] = {}
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : Optional[int] = [self.cls_token_id]
UpperCamelCase : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase( self , A_ , A_ = None , A_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Tuple = [self.sep_token_id]
UpperCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return len(self.encoder )
def __UpperCamelCase( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCamelCase : Tuple = tuple(A_ )
UpperCamelCase : List[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
UpperCamelCase : str = get_pairs(A_ )
if not pairs:
return token
while True:
UpperCamelCase : Any = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase : Any = bigram
UpperCamelCase : Optional[Any] = []
UpperCamelCase : List[Any] = 0
while i < len(A_ ):
try:
UpperCamelCase : Tuple = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase : int = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase : int = tuple(A_ )
UpperCamelCase : Optional[int] = new_word
if len(A_ ) == 1:
break
else:
UpperCamelCase : Optional[Any] = get_pairs(A_ )
UpperCamelCase : Union[str, Any] = "@@ ".join(A_ )
UpperCamelCase : str = word[:-4]
UpperCamelCase : str = word
return word
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = []
UpperCamelCase : Optional[Any] = re.findall(R"\S+\n?" , A_ )
for token in words:
split_tokens.extend(list(self.bpe(A_ ).split(" " ) ) )
return split_tokens
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return self.decoder.get(A_ , self.unk_token )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = " ".join(A_ ).replace("@@ " , "" ).strip()
return out_string
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase : Dict = os.path.join(
A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase : Union[str, Any] = os.path.join(
A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(A_ ):
copyfile(self.merges_file , A_ )
return out_vocab_file, out_merge_file
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if isinstance(A_ , A_ ):
try:
with open(A_ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(A_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
UpperCamelCase : Optional[int] = f.readlines()
for lineTmp in lines:
UpperCamelCase : str = lineTmp.strip()
UpperCamelCase : Optional[Any] = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
UpperCamelCase : List[Any] = line[:idx]
UpperCamelCase : int = len(self.encoder )
| 702
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=10 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=None , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = size if size is not None else {"shortest_edge": 18}
UpperCamelCase : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCamelCase : Optional[Any] = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : List[Any] = num_channels
UpperCamelCase : Union[str, Any] = num_frames
UpperCamelCase : Any = image_size
UpperCamelCase : Tuple = min_resolution
UpperCamelCase : Optional[Any] = max_resolution
UpperCamelCase : Any = do_resize
UpperCamelCase : Tuple = size
UpperCamelCase : List[Any] = do_normalize
UpperCamelCase : Optional[int] = image_mean
UpperCamelCase : Any = image_std
UpperCamelCase : str = crop_size
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[str] = VivitImageProcessor if is_vision_available() else None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = VivitImageProcessingTester(self )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "do_center_crop" ) )
self.assertTrue(hasattr(A_ , "size" ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
UpperCamelCase : Any = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : str = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : str = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : Any = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : List[Any] = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 38
| 0
|
from math import factorial, pi
def A_ ( _lowerCAmelCase , _lowerCAmelCase = 30 ) -> float:
if not isinstance(_lowerCAmelCase , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
UpperCamelCase : str = float(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase , _lowerCAmelCase = 30 ) -> float:
if not isinstance(_lowerCAmelCase , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
UpperCamelCase : Dict = float(_lowerCAmelCase )
UpperCamelCase : int = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 703
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__lowerCamelCase : Dict = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__lowerCamelCase : Tuple = {
"""facebook/blenderbot_small-90M""": 512,
}
class A__ ( __snake_case ):
_UpperCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase :Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Optional[Any] = BlenderbotSmallTokenizer
def __init__( self , A_=None , A_=None , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|endoftext|>" , A_=False , A_=True , **A_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=A_ , merges=A_ , add_prefix_space=A_ , trim_offsets=A_ , ) , bos_token=A_ , eos_token=A_ , unk_token=A_ , **A_ , )
UpperCamelCase : Union[str, Any] = add_prefix_space
def __UpperCamelCase( self , A_ , A_=None ):
'''simple docstring'''
UpperCamelCase : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Tuple = [self.sep_token_id]
UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 38
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=True , A_=1 / 255 , A_=True , ):
'''simple docstring'''
UpperCamelCase : Dict = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
UpperCamelCase : str = parent
UpperCamelCase : Optional[Any] = batch_size
UpperCamelCase : Any = num_channels
UpperCamelCase : str = min_resolution
UpperCamelCase : Union[str, Any] = max_resolution
UpperCamelCase : str = do_resize
UpperCamelCase : List[str] = size
UpperCamelCase : List[Any] = do_normalize
UpperCamelCase : int = image_mean
UpperCamelCase : Optional[Any] = image_std
UpperCamelCase : str = do_rescale
UpperCamelCase : Dict = rescale_factor
UpperCamelCase : Union[str, Any] = do_pad
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCamelCase( self , A_ , A_=False ):
'''simple docstring'''
if not batched:
UpperCamelCase : Tuple = image_inputs[0]
if isinstance(A_ , Image.Image ):
UpperCamelCase : Union[str, Any] = image.size
else:
UpperCamelCase : Optional[Any] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase : Dict = int(self.size["shortest_edge"] * h / w )
UpperCamelCase : List[str] = self.size["shortest_edge"]
elif w > h:
UpperCamelCase : List[str] = self.size["shortest_edge"]
UpperCamelCase : Optional[Any] = int(self.size["shortest_edge"] * w / h )
else:
UpperCamelCase : Any = self.size["shortest_edge"]
UpperCamelCase : str = self.size["shortest_edge"]
else:
UpperCamelCase : Optional[int] = []
for image in image_inputs:
UpperCamelCase : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase : Tuple = max(A_ , key=lambda A_ : item[0] )[0]
UpperCamelCase : List[str] = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[str] = DetaImageProcessor if is_vision_available() else None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = DetaImageProcessingTester(self )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "do_rescale" ) )
self.assertTrue(hasattr(A_ , "do_pad" ) )
self.assertTrue(hasattr(A_ , "size" ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCamelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
UpperCamelCase : Optional[int] = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCamelCase : Any = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase : List[str] = image_processing(A_ , return_tensors="pt" ).pixel_values
UpperCamelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase : int = image_processing(A_ , return_tensors="pt" ).pixel_values
UpperCamelCase : str = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
UpperCamelCase : Union[str, Any] = json.loads(f.read() )
UpperCamelCase : Optional[int] = {"image_id": 3_9769, "annotations": target}
# encode them
UpperCamelCase : Dict = DetaImageProcessor()
UpperCamelCase : List[str] = image_processing(images=A_ , annotations=A_ , return_tensors="pt" )
# verify pixel values
UpperCamelCase : Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , A_ )
UpperCamelCase : List[str] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A_ , atol=1e-4 ) )
# verify area
UpperCamelCase : Optional[int] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A_ ) )
# verify boxes
UpperCamelCase : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A_ )
UpperCamelCase : List[Any] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A_ ) )
# verify is_crowd
UpperCamelCase : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A_ ) )
# verify class_labels
UpperCamelCase : Optional[int] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A_ ) )
# verify orig_size
UpperCamelCase : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A_ ) )
# verify size
UpperCamelCase : Dict = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A_ ) )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
UpperCamelCase : Optional[int] = json.loads(f.read() )
UpperCamelCase : int = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
UpperCamelCase : Tuple = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
UpperCamelCase : Dict = DetaImageProcessor(format="coco_panoptic" )
UpperCamelCase : Dict = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors="pt" )
# verify pixel values
UpperCamelCase : List[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , A_ )
UpperCamelCase : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A_ , atol=1e-4 ) )
# verify area
UpperCamelCase : List[str] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A_ ) )
# verify boxes
UpperCamelCase : int = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A_ )
UpperCamelCase : Tuple = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase : List[Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A_ ) )
# verify is_crowd
UpperCamelCase : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A_ ) )
# verify class_labels
UpperCamelCase : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A_ ) )
# verify masks
UpperCamelCase : Optional[int] = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , A_ )
# verify orig_size
UpperCamelCase : Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A_ ) )
# verify size
UpperCamelCase : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A_ ) )
| 704
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : int = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 38
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = tempfile.mkdtemp()
# fmt: off
UpperCamelCase : List[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase : List[Any] = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCamelCase : Any = {"unk_token": "<unk>"}
UpperCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(A_ ) )
UpperCamelCase : Dict = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCamelCase : str = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(A_ , A_ )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase : str = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.get_tokenizer()
UpperCamelCase : Dict = self.get_rust_tokenizer()
UpperCamelCase : List[Any] = self.get_image_processor()
UpperCamelCase : Any = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase : int = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
UpperCamelCase : str = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase : int = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A_ )
self.assertIsInstance(processor_fast.tokenizer , A_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A_ )
self.assertIsInstance(processor_fast.image_processor , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCamelCase : Tuple = self.get_image_processor(do_normalize=A_ , padding_value=1.0 )
UpperCamelCase : List[str] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.get_image_processor()
UpperCamelCase : str = self.get_tokenizer()
UpperCamelCase : List[Any] = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase : Union[str, Any] = self.prepare_image_inputs()
UpperCamelCase : Optional[int] = image_processor(A_ , return_tensors="np" )
UpperCamelCase : str = processor(images=A_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.get_image_processor()
UpperCamelCase : List[str] = self.get_tokenizer()
UpperCamelCase : Tuple = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase : Tuple = "lower newer"
UpperCamelCase : List[str] = processor(text=A_ )
UpperCamelCase : Optional[Any] = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.get_image_processor()
UpperCamelCase : Tuple = self.get_tokenizer()
UpperCamelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase : int = "lower newer"
UpperCamelCase : Any = self.prepare_image_inputs()
UpperCamelCase : Optional[int] = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.get_image_processor()
UpperCamelCase : str = self.get_tokenizer()
UpperCamelCase : List[Any] = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase : Optional[Any] = self.prepare_image_inputs()
UpperCamelCase : Optional[Any] = self.prepare_image_inputs()
UpperCamelCase : Optional[int] = processor(images=A_ , visual_prompt=A_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.get_image_processor()
UpperCamelCase : List[Any] = self.get_tokenizer()
UpperCamelCase : List[Any] = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase : Optional[int] = processor.batch_decode(A_ )
UpperCamelCase : Optional[int] = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
| 705
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__lowerCamelCase : str = None
try:
import msvcrt
except ImportError:
__lowerCamelCase : str = None
try:
import fcntl
except ImportError:
__lowerCamelCase : List[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
__lowerCamelCase : str = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
__lowerCamelCase : Union[str, Any] = """3.0.12"""
__lowerCamelCase : Any = None
def A_ ( ) -> List[Any]:
global _logger
UpperCamelCase : Any = _logger or logging.getLogger(__name__ )
return _logger
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = lock_file
return None
def __str__( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class A__ :
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = lock
return None
def __enter__( self ):
'''simple docstring'''
return self.lock
def __exit__( self , A_ , A_ , A_ ):
'''simple docstring'''
self.lock.release()
return None
class A__ :
def __init__( self , A_ , A_=-1 , A_=None ):
'''simple docstring'''
UpperCamelCase : List[Any] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
UpperCamelCase : Dict = self.hash_filename_if_too_long(A_ , A_ )
# The path to the lock file.
UpperCamelCase : List[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCamelCase : Tuple = None
# The default timeout value.
UpperCamelCase : Optional[Any] = timeout
# We use this lock primarily for the lock counter.
UpperCamelCase : Union[str, Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCamelCase : Dict = 0
return None
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._lock_file
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = float(A_ )
return None
def __UpperCamelCase( self ):
'''simple docstring'''
raise NotImplementedError()
def __UpperCamelCase( self ):
'''simple docstring'''
raise NotImplementedError()
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._lock_file_fd is not None
def __UpperCamelCase( self , A_=None , A_=0.05 ):
'''simple docstring'''
if timeout is None:
UpperCamelCase : Optional[Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCamelCase : Dict = id(self )
UpperCamelCase : List[str] = self._lock_file
UpperCamelCase : int = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(A_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCamelCase : List[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __UpperCamelCase( self , A_=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCamelCase : List[Any] = id(self )
UpperCamelCase : Dict = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
UpperCamelCase : Dict = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self , A_ , A_ , A_ ):
'''simple docstring'''
self.release()
return None
def __del__( self ):
'''simple docstring'''
self.release(force=A_ )
return None
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = os.path.basename(A_ )
if len(A_ ) > max_length and max_length > 0:
UpperCamelCase : Optional[int] = os.path.dirname(A_ )
UpperCamelCase : int = str(hash(A_ ) )
UpperCamelCase : Any = filename[: max_length - len(A_ ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(A_ , A_ )
else:
return path
class A__ ( __snake_case ):
def __init__( self , A_ , A_=-1 , A_=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(A_ , timeout=A_ , max_filename_length=A_ )
UpperCamelCase : List[Any] = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCamelCase : str = os.open(self._lock_file , A_ )
except OSError:
pass
else:
try:
msvcrt.locking(A_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(A_ )
else:
UpperCamelCase : Optional[Any] = fd
return None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self._lock_file_fd
UpperCamelCase : str = None
msvcrt.locking(A_ , msvcrt.LK_UNLCK , 1 )
os.close(A_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class A__ ( __snake_case ):
def __init__( self , A_ , A_=-1 , A_=None ):
'''simple docstring'''
UpperCamelCase : Tuple = os.statvfs(os.path.dirname(A_ ) ).f_namemax
super().__init__(A_ , timeout=A_ , max_filename_length=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCamelCase : int = os.open(self._lock_file , A_ )
try:
fcntl.flock(A_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(A_ )
else:
UpperCamelCase : List[str] = fd
return None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self._lock_file_fd
UpperCamelCase : List[Any] = None
fcntl.flock(A_ , fcntl.LOCK_UN )
os.close(A_ )
return None
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCamelCase : Optional[int] = os.open(self._lock_file , A_ )
except OSError:
pass
else:
UpperCamelCase : Tuple = fd
return None
def __UpperCamelCase( self ):
'''simple docstring'''
os.close(self._lock_file_fd )
UpperCamelCase : str = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__lowerCamelCase : Dict = None
if msvcrt:
__lowerCamelCase : Any = WindowsFileLock
elif fcntl:
__lowerCamelCase : Any = UnixFileLock
else:
__lowerCamelCase : int = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 38
| 0
|
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class A__ :
_UpperCAmelCase :CommonSchedulerState
# setable values
_UpperCAmelCase :jnp.ndarray
_UpperCAmelCase :jnp.ndarray
_UpperCAmelCase :Optional[int] = None
@classmethod
def __UpperCamelCase( cls , A_ , A_ , A_ ):
'''simple docstring'''
return cls(common=A_ , init_noise_sigma=A_ , timesteps=A_ )
@dataclass
class A__ ( __snake_case ):
_UpperCAmelCase :DDPMSchedulerState
class A__ ( __snake_case , __snake_case ):
_UpperCAmelCase :Dict = [e.name for e in FlaxKarrasDiffusionSchedulers]
_UpperCAmelCase :jnp.dtype
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return True
@register_to_config
def __init__( self , A_ = 1000 , A_ = 0.00_01 , A_ = 0.02 , A_ = "linear" , A_ = None , A_ = "fixed_small" , A_ = True , A_ = "epsilon" , A_ = jnp.floataa , ):
'''simple docstring'''
UpperCamelCase : int = dtype
def __UpperCamelCase( self , A_ = None ):
'''simple docstring'''
if common is None:
UpperCamelCase : List[Any] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCamelCase : List[Any] = jnp.array(1.0 , dtype=self.dtype )
UpperCamelCase : int = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=A_ , init_noise_sigma=A_ , timesteps=A_ , )
def __UpperCamelCase( self , A_ , A_ , A_ = None ):
'''simple docstring'''
return sample
def __UpperCamelCase( self , A_ , A_ , A_ = () ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCamelCase : Union[str, Any] = (jnp.arange(0 , A_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=A_ , timesteps=A_ , )
def __UpperCamelCase( self , A_ , A_ , A_=None , A_=None ):
'''simple docstring'''
UpperCamelCase : Optional[int] = state.common.alphas_cumprod[t]
UpperCamelCase : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCamelCase : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCamelCase : str = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCamelCase : str = jnp.clip(A_ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCamelCase : Optional[Any] = jnp.log(jnp.clip(A_ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
UpperCamelCase : Tuple = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCamelCase : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCamelCase : Optional[int] = variance
UpperCamelCase : Dict = state.common.betas[t]
UpperCamelCase : Dict = (predicted_variance + 1) / 2
UpperCamelCase : List[Any] = frac * max_log + (1 - frac) * min_log
return variance
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ = None , A_ = True , ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = timestep
if key is None:
UpperCamelCase : str = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCamelCase : List[str] = jnp.split(A_ , sample.shape[1] , axis=1 )
else:
UpperCamelCase : int = None
# 1. compute alphas, betas
UpperCamelCase : Tuple = state.common.alphas_cumprod[t]
UpperCamelCase : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCamelCase : Union[str, Any] = 1 - alpha_prod_t
UpperCamelCase : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCamelCase : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCamelCase : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
UpperCamelCase : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCamelCase : Dict = jnp.clip(A_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase : int = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCamelCase : Union[str, Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase : Any = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCamelCase : Dict = jax.random.split(A_ , num=1 )
UpperCamelCase : List[Any] = jax.random.normal(A_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(A_ , A_ , predicted_variance=A_ ) ** 0.5) * noise
UpperCamelCase : Tuple = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCamelCase : str = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=A_ , state=A_ )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
return add_noise_common(state.common , A_ , A_ , A_ )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
return get_velocity_common(state.common , A_ , A_ , A_ )
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 706
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ) -> str:
if config_name_or_path is None:
UpperCamelCase : Dict = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
UpperCamelCase : Tuple = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
UpperCamelCase : Tuple = question_encoder_name_or_path
UpperCamelCase : Any = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
UpperCamelCase : Optional[Any] = RagConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : Tuple = AutoConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : int = gen_config
UpperCamelCase : Dict = question_encoder_config
UpperCamelCase : Tuple = model_class.from_pretrained_question_encoder_generator(
_lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase )
rag_model.save_pretrained(_lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(_lowerCAmelCase )
# Save tokenizers.
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
__lowerCamelCase : Dict = parser.parse_args()
__lowerCamelCase : Dict = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 38
| 0
|
import sys
import turtle
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
__lowerCamelCase : int = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
__lowerCamelCase : Tuple = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 707
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
UpperCamelCase : Dict = parent
UpperCamelCase : str = 13
UpperCamelCase : int = 7
UpperCamelCase : str = True
UpperCamelCase : Dict = True
UpperCamelCase : str = True
UpperCamelCase : Tuple = True
UpperCamelCase : List[str] = 99
UpperCamelCase : Optional[Any] = 384
UpperCamelCase : Tuple = 2
UpperCamelCase : Union[str, Any] = 4
UpperCamelCase : Dict = 37
UpperCamelCase : Any = "gelu"
UpperCamelCase : List[Any] = 0.1
UpperCamelCase : int = 0.1
UpperCamelCase : Tuple = 512
UpperCamelCase : List[Any] = 16
UpperCamelCase : int = 2
UpperCamelCase : Dict = 0.02
UpperCamelCase : Optional[Any] = 3
UpperCamelCase : List[Any] = 4
UpperCamelCase : Dict = 128
UpperCamelCase : Optional[Any] = 2
UpperCamelCase : Optional[int] = 9
UpperCamelCase : Optional[int] = 1
UpperCamelCase : Union[str, Any] = None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : str = None
if self.use_input_mask:
UpperCamelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Tuple = None
if self.use_token_type_ids:
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : List[Any] = None
if self.use_labels:
UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Any = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = TFConvBertModel(config=A_ )
UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase : Optional[int] = [input_ids, input_mask]
UpperCamelCase : Any = model(A_ )
UpperCamelCase : int = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = TFConvBertForMaskedLM(config=A_ )
UpperCamelCase : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : Dict = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = self.num_labels
UpperCamelCase : int = TFConvBertForSequenceClassification(config=A_ )
UpperCamelCase : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = self.num_choices
UpperCamelCase : str = TFConvBertForMultipleChoice(config=A_ )
UpperCamelCase : List[Any] = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : Dict = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : Any = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : List[str] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCamelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = self.num_labels
UpperCamelCase : str = TFConvBertForTokenClassification(config=A_ )
UpperCamelCase : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : str = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = TFConvBertForQuestionAnswering(config=A_ )
UpperCamelCase : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : Union[str, Any] = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Optional[Any] = config_and_inputs
UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCAmelCase :Optional[Any] = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase :Any = False
_UpperCAmelCase :int = False
_UpperCAmelCase :str = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = TFConvBertModelTester(self )
UpperCamelCase : Dict = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Any = True
if hasattr(A_ , "use_cache" ):
UpperCamelCase : List[str] = True
UpperCamelCase : List[Any] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Any = getattr(self.model_tester , "key_length" , A_ )
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = self._prepare_for_class(A_ , A_ )
UpperCamelCase : Dict = model_class(A_ )
UpperCamelCase : Optional[int] = len(model(A_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ , saved_model=A_ )
UpperCamelCase : Union[str, Any] = os.path.join(A_ , "saved_model" , "1" )
UpperCamelCase : Dict = tf.keras.models.load_model(A_ )
UpperCamelCase : str = model(A_ )
if self.is_encoder_decoder:
UpperCamelCase : Union[str, Any] = outputs["encoder_hidden_states"]
UpperCamelCase : Any = outputs["encoder_attentions"]
else:
UpperCamelCase : Any = outputs["hidden_states"]
UpperCamelCase : List[str] = outputs["attentions"]
self.assertEqual(len(A_ ) , A_ )
UpperCamelCase : int = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A_ ) , A_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Dict = True
UpperCamelCase : int = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Optional[int] = getattr(self.model_tester , "key_length" , A_ )
UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , A_ )
def check_decoder_attentions_output(A_ ):
UpperCamelCase : Optional[Any] = len(A_ )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase : Any = outputs.decoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(A_ ):
UpperCamelCase : Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : List[Any] = False
UpperCamelCase : Dict = model_class(A_ )
UpperCamelCase : Dict = model(self._prepare_for_class(A_ , A_ ) )
UpperCamelCase : List[str] = len(A_ )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
if self.is_encoder_decoder:
UpperCamelCase : int = model_class(A_ )
UpperCamelCase : Tuple = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_decoder_attentions_output(A_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase : Tuple = True
UpperCamelCase : int = model_class(A_ )
UpperCamelCase : Dict = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
# Check attention is always last and order is fine
UpperCamelCase : Optional[int] = True
UpperCamelCase : List[str] = True
UpperCamelCase : Optional[int] = model_class(A_ )
UpperCamelCase : Optional[Any] = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) )
self.assertEqual(model.config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase : List[str] = model(A_ )[0]
UpperCamelCase : int = [1, 6, 768]
self.assertEqual(output.shape , A_ )
UpperCamelCase : List[str] = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1e-4 )
| 38
| 0
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : int = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class A__ ( __snake_case ):
_UpperCAmelCase :int = 'detr'
_UpperCAmelCase :List[Any] = ['past_key_values']
_UpperCAmelCase :Optional[int] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , A_=True , A_=None , A_=3 , A_=100 , A_=6 , A_=2048 , A_=8 , A_=6 , A_=2048 , A_=8 , A_=0.0 , A_=0.0 , A_=True , A_="relu" , A_=256 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=1.0 , A_=False , A_="sine" , A_="resnet50" , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=1 , A_=1 , A_=5 , A_=2 , A_=0.1 , **A_ , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCamelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(A_ , A_ ):
UpperCamelCase : Tuple = backbone_config.get("model_type" )
UpperCamelCase : Optional[int] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase : List[str] = config_class.from_dict(A_ )
# set timm attributes to None
UpperCamelCase : Optional[Any] = None, None, None
UpperCamelCase : Optional[int] = use_timm_backbone
UpperCamelCase : Any = backbone_config
UpperCamelCase : Union[str, Any] = num_channels
UpperCamelCase : int = num_queries
UpperCamelCase : Union[str, Any] = d_model
UpperCamelCase : List[Any] = encoder_ffn_dim
UpperCamelCase : Optional[int] = encoder_layers
UpperCamelCase : List[str] = encoder_attention_heads
UpperCamelCase : int = decoder_ffn_dim
UpperCamelCase : Optional[Any] = decoder_layers
UpperCamelCase : List[str] = decoder_attention_heads
UpperCamelCase : int = dropout
UpperCamelCase : List[str] = attention_dropout
UpperCamelCase : int = activation_dropout
UpperCamelCase : Dict = activation_function
UpperCamelCase : Any = init_std
UpperCamelCase : Any = init_xavier_std
UpperCamelCase : Dict = encoder_layerdrop
UpperCamelCase : int = decoder_layerdrop
UpperCamelCase : Tuple = encoder_layers
UpperCamelCase : List[str] = auxiliary_loss
UpperCamelCase : List[str] = position_embedding_type
UpperCamelCase : str = backbone
UpperCamelCase : str = use_pretrained_backbone
UpperCamelCase : Optional[Any] = dilation
# Hungarian matcher
UpperCamelCase : Union[str, Any] = class_cost
UpperCamelCase : Optional[int] = bbox_cost
UpperCamelCase : Union[str, Any] = giou_cost
# Loss coefficients
UpperCamelCase : List[str] = mask_loss_coefficient
UpperCamelCase : Optional[int] = dice_loss_coefficient
UpperCamelCase : Optional[int] = bbox_loss_coefficient
UpperCamelCase : str = giou_loss_coefficient
UpperCamelCase : str = eos_coefficient
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.d_model
@classmethod
def __UpperCamelCase( cls , A_ , **A_ ):
'''simple docstring'''
return cls(backbone_config=A_ , **A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCamelCase : Tuple = self.backbone_config.to_dict()
UpperCamelCase : int = self.__class__.model_type
return output
class A__ ( __snake_case ):
_UpperCAmelCase :List[Any] = version.parse('1.11' )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 1e-5
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 12
| 708
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __snake_case ):
_UpperCAmelCase :Union[str, Any] = 'camembert'
def __init__( self , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase : List[str] = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : Dict = hidden_act
UpperCamelCase : str = intermediate_size
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : Union[str, Any] = max_position_embeddings
UpperCamelCase : Optional[Any] = type_vocab_size
UpperCamelCase : int = initializer_range
UpperCamelCase : List[str] = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : int = use_cache
UpperCamelCase : List[str] = classifier_dropout
class A__ ( __snake_case ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 38
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[Any] = KandinskyVaaInpaintPipeline
_UpperCAmelCase :List[Any] = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_UpperCAmelCase :List[Any] = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_UpperCAmelCase :List[Any] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCAmelCase :Optional[int] = False
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 100
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : List[Any] = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase : Optional[int] = UNetaDConditionModel(**A_ )
return model
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.dummy_unet
UpperCamelCase : Optional[Any] = self.dummy_movq
UpperCamelCase : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , )
UpperCamelCase : Dict = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCamelCase( self , A_ , A_=0 ):
'''simple docstring'''
UpperCamelCase : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
# create init_image
UpperCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase : str = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((256, 256) )
# create mask
UpperCamelCase : Dict = np.ones((64, 64) , dtype=np.floataa )
UpperCamelCase : Union[str, Any] = 0
if str(A_ ).startswith("mps" ):
UpperCamelCase : Tuple = torch.manual_seed(A_ )
else:
UpperCamelCase : Optional[int] = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase : Optional[int] = {
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = "cpu"
UpperCamelCase : Dict = self.get_dummy_components()
UpperCamelCase : str = self.pipeline_class(**A_ )
UpperCamelCase : Any = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : int = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase : List[Any] = output.images
UpperCamelCase : List[str] = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : List[str] = np.array(
[0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __UpperCamelCase( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" )
UpperCamelCase : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCamelCase : Tuple = np.ones((768, 768) , dtype=np.floataa )
UpperCamelCase : str = 0
UpperCamelCase : Tuple = "a hat"
UpperCamelCase : Tuple = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
UpperCamelCase : Optional[Any] = KandinskyVaaInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint" , torch_dtype=torch.floataa )
UpperCamelCase : int = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase : List[Any] = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase : Union[str, Any] = pipeline(
image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_ , A_ )
| 709
|
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
return int(input_a == input_a == 0 )
def A_ ( ) -> None:
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 38
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class A__ :
_UpperCAmelCase :List[str]
_UpperCAmelCase :Optional[str] = None
# Automatically constructed
_UpperCAmelCase :ClassVar[str] = "dict"
_UpperCAmelCase :ClassVar[Any] = None
_UpperCAmelCase :str = field(default='Translation' , init=__snake_case , repr=__snake_case )
def __call__( self ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __UpperCamelCase( self ):
'''simple docstring'''
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class A__ :
_UpperCAmelCase :Optional[List] = None
_UpperCAmelCase :Optional[int] = None
_UpperCAmelCase :Optional[str] = None
# Automatically constructed
_UpperCAmelCase :ClassVar[str] = "dict"
_UpperCAmelCase :ClassVar[Any] = None
_UpperCAmelCase :str = field(default='TranslationVariableLanguages' , init=__snake_case , repr=__snake_case )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = sorted(set(self.languages ) ) if self.languages else None
UpperCamelCase : str = len(self.languages ) if self.languages else None
def __call__( self ):
'''simple docstring'''
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = set(self.languages )
if self.languages and set(A_ ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(A_ ) - lang_set ) )}) are not in valid set ({", ".join(A_ )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
UpperCamelCase : List[str] = []
for lang, text in translation_dict.items():
if isinstance(A_ , A_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
UpperCamelCase : Dict = zip(*sorted(A_ ) )
return {"language": languages, "translation": translations}
def __UpperCamelCase( self ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
}
| 710
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __snake_case ):
_UpperCAmelCase :Optional[int] = ['image_processor', 'tokenizer']
_UpperCAmelCase :Tuple = 'BlipImageProcessor'
_UpperCAmelCase :Optional[int] = 'AutoTokenizer'
def __init__( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = False
super().__init__(A_ , A_ )
UpperCamelCase : str = self.image_processor
def __call__( self , A_ = None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
UpperCamelCase : int = self.tokenizer
UpperCamelCase : Optional[int] = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
return text_encoding
# add pixel_values
UpperCamelCase : int = self.image_processor(A_ , return_tensors=A_ )
if text is not None:
UpperCamelCase : Dict = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
else:
UpperCamelCase : Dict = None
if text_encoding is not None:
encoding_image_processor.update(A_ )
return encoding_image_processor
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.tokenizer.model_input_names
UpperCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 38
| 0
|
'''simple docstring'''
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> bool:
# Base Case
if index == len(_lowerCAmelCase ):
return True
# Recursive Step
for i in range(_lowerCAmelCase ):
if valid_coloring(graph[index] , _lowerCAmelCase , _lowerCAmelCase ):
# Color current vertex
UpperCamelCase : Optional[Any] = i
# Validate coloring
if util_color(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index + 1 ):
return True
# Backtrack
UpperCamelCase : int = -1
return False
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
UpperCamelCase : int = [-1] * len(_lowerCAmelCase )
if util_color(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 0 ):
return colored_vertices
return []
| 711
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
class A__ ( __snake_case ):
_UpperCAmelCase :Tuple = ['audio_values', 'audio_mask']
def __init__( self , A_=2048 , A_=1 , A_=[16, 16] , A_=128 , A_=4_4100 , A_=86 , A_=2048 , A_=0.0 , **A_ , ):
'''simple docstring'''
super().__init__(
feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ , )
UpperCamelCase : Optional[int] = spectrogram_length
UpperCamelCase : Dict = num_channels
UpperCamelCase : Optional[Any] = patch_size
UpperCamelCase : str = feature_size // self.patch_size[1]
UpperCamelCase : List[str] = n_fft
UpperCamelCase : int = sampling_rate // hop_length_to_sampling_rate
UpperCamelCase : Optional[int] = sampling_rate
UpperCamelCase : int = padding_value
UpperCamelCase : str = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A_ , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=A_ , norm="slaney" , mel_scale="slaney" , ).T
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = spectrogram(
A_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
UpperCamelCase : List[Any] = log_spec[:, :-1]
UpperCamelCase : Optional[int] = log_spec - 20.0
UpperCamelCase : str = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , A_ , A_ = None , A_ = True , A_ = None , A_ = False , A_ = False , **A_ , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCamelCase : Optional[int] = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
UpperCamelCase : str = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
UpperCamelCase : str = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A_ ):
UpperCamelCase : int = [np.asarray(A_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
UpperCamelCase : List[str] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
UpperCamelCase : str = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
UpperCamelCase : Tuple = np.array(A_ ).astype(np.floataa )
# convert into correct format for padding
UpperCamelCase : Union[str, Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
UpperCamelCase : Any = np.ones([len(A_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
UpperCamelCase : List[str] = padded_audio_features * self.padding_value
for i in range(len(A_ ) ):
UpperCamelCase : Union[str, Any] = audio_features[i]
UpperCamelCase : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
UpperCamelCase : Optional[Any] = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
UpperCamelCase : int = {"audio_values": padded_audio_features}
UpperCamelCase : Any = BatchFeature(data=A_ , tensor_type=A_ )
return encoded_inputs
| 38
| 0
|
from __future__ import annotations
import math
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> float:
UpperCamelCase : Dict = u
for i in range(1 , _lowerCAmelCase ):
UpperCamelCase : Optional[Any] = temp * (u - i)
return temp
def A_ ( ) -> None:
UpperCamelCase : Optional[Any] = int(input("enter the numbers of values: " ) )
UpperCamelCase : list[list[float]] = []
for _ in range(_lowerCAmelCase ):
y.append([] )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
y[i].append(_lowerCAmelCase )
UpperCamelCase : str = 0
print("enter the values of parameters in a list: " )
UpperCamelCase : Tuple = list(map(_lowerCAmelCase , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(_lowerCAmelCase ):
UpperCamelCase : Union[str, Any] = float(input() )
UpperCamelCase : Optional[int] = int(input("enter the value to interpolate: " ) )
UpperCamelCase : int = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _lowerCAmelCase ):
for j in range(n - i ):
UpperCamelCase : Optional[Any] = y[j + 1][i - 1] - y[j][i - 1]
UpperCamelCase : int = y[0][0]
for i in range(1 , _lowerCAmelCase ):
summ += (ucal(_lowerCAmelCase , _lowerCAmelCase ) * y[0][i]) / math.factorial(_lowerCAmelCase )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 712
|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCamelCase : Dict = TypeVar("""KT""")
__lowerCamelCase : Dict = TypeVar("""VT""")
class A__ ( Generic[KT, VT] ):
def __init__( self , A_ = "root" , A_ = None ):
'''simple docstring'''
UpperCamelCase : int = key
UpperCamelCase : List[Any] = value
UpperCamelCase : list[Node[KT, VT]] = []
def __repr__( self ):
'''simple docstring'''
return F"""Node({self.key}: {self.value})"""
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return len(self.forward )
class A__ ( Generic[KT, VT] ):
def __init__( self , A_ = 0.5 , A_ = 16 ):
'''simple docstring'''
UpperCamelCase : Node[KT, VT] = Node[KT, VT]()
UpperCamelCase : List[Any] = 0
UpperCamelCase : Union[str, Any] = p
UpperCamelCase : List[str] = max_level
def __str__( self ):
'''simple docstring'''
UpperCamelCase : int = list(self )
if len(A_ ) == 0:
return F"""SkipList(level={self.level})"""
UpperCamelCase : str = max((len(str(A_ ) ) for item in items) , default=4 )
UpperCamelCase : Dict = max(A_ , 4 ) + 4
UpperCamelCase : str = self.head
UpperCamelCase : List[Any] = []
UpperCamelCase : int = node.forward.copy()
lines.append(F"""[{node.key}]""".ljust(A_ , "-" ) + "* " * len(A_ ) )
lines.append(" " * label_size + "| " * len(A_ ) )
while len(node.forward ) != 0:
UpperCamelCase : Union[str, Any] = node.forward[0]
lines.append(
F"""[{node.key}]""".ljust(A_ , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(A_ ) )
UpperCamelCase : Tuple = node.forward
lines.append("None".ljust(A_ ) + "* " * len(A_ ) )
return F"""SkipList(level={self.level})\n""" + "\n".join(A_ )
def __iter__( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
UpperCamelCase : Union[str, Any] = node.forward[0]
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = []
UpperCamelCase : List[Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
UpperCamelCase : str = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(A_ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : str = self._locate_node(A_ )
if node is not None:
for i, update_node in enumerate(A_ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
UpperCamelCase : Tuple = node.forward[i]
else:
UpperCamelCase : List[Any] = update_node.forward[:i]
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[int] = self._locate_node(A_ )
if node is not None:
UpperCamelCase : Union[str, Any] = value
else:
UpperCamelCase : Dict = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , A_ ):
update_vector.append(self.head )
UpperCamelCase : Optional[int] = level
UpperCamelCase : Dict = Node(A_ , A_ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(A_ )
else:
UpperCamelCase : List[Any] = new_node
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Union[str, Any] = self._locate_node(A_ )
if node is not None:
return node.value
return None
def A_ ( ) -> List[Any]:
UpperCamelCase : int = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 12 )
skip_list.insert("Key3" , 41 )
skip_list.insert("Key4" , -19 )
UpperCamelCase : Optional[int] = skip_list.head
UpperCamelCase : List[str] = {}
while node.level != 0:
UpperCamelCase : str = node.forward[0]
UpperCamelCase : Optional[int] = node.value
assert len(_lowerCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def A_ ( ) -> List[Any]:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert("Key1" , 10 )
skip_list.insert("Key1" , 12 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 10 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 10 )
UpperCamelCase : Dict = skip_list.head
UpperCamelCase : Tuple = {}
while node.level != 0:
UpperCamelCase : List[str] = node.forward[0]
UpperCamelCase : Dict = node.value
if len(_lowerCAmelCase ) != 4:
print()
assert len(_lowerCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def A_ ( ) -> List[Any]:
UpperCamelCase : List[Any] = SkipList()
assert skip_list.find("Some key" ) is None
def A_ ( ) -> Tuple:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert("Key2" , 20 )
assert skip_list.find("Key2" ) == 20
skip_list.insert("Some Key" , 10 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 13 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 10
assert skip_list.find("V" ) == 13
def A_ ( ) -> Dict:
UpperCamelCase : Optional[int] = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def A_ ( ) -> Dict:
UpperCamelCase : List[Any] = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def A_ ( ) -> List[str]:
UpperCamelCase : int = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 14
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def A_ ( ) -> List[Any]:
UpperCamelCase : List[Any] = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 142 )
skip_list.insert("Key2" , 15 )
skip_list.delete("X" )
def traverse_keys(_lowerCAmelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_lowerCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def A_ ( ) -> Union[str, Any]:
def is_sorted(_lowerCAmelCase ):
return all(next_item >= item for item, next_item in zip(_lowerCAmelCase , lst[1:] ) )
UpperCamelCase : int = SkipList()
for i in range(10 ):
skip_list.insert(_lowerCAmelCase , _lowerCAmelCase )
assert is_sorted(list(_lowerCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_lowerCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_lowerCAmelCase ) )
def A_ ( ) -> Tuple:
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def A_ ( ) -> List[str]:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 38
| 0
|
from __future__ import annotations
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> list[list[int]]:
UpperCamelCase : list[list[int]] = []
create_all_state(1 , _lowerCAmelCase , _lowerCAmelCase , [] , _lowerCAmelCase )
return result
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCAmelCase , total_number - level + 2 ):
current_list.append(_lowerCAmelCase )
create_all_state(i + 1 , _lowerCAmelCase , level - 1 , _lowerCAmelCase , _lowerCAmelCase )
current_list.pop()
def A_ ( _lowerCAmelCase ) -> None:
for i in total_list:
print(*_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Tuple = 4
__lowerCamelCase : Any = 2
__lowerCamelCase : List[str] = generate_all_combinations(n, k)
print_all_state(total_list)
| 713
|
from PIL import Image
def A_ ( _lowerCAmelCase ) -> Image:
UpperCamelCase , UpperCamelCase : List[Any] = image.size
UpperCamelCase : Union[str, Any] = 0
UpperCamelCase : List[str] = image.load()
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
UpperCamelCase : List[Any] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCAmelCase ):
for i in range(_lowerCAmelCase ):
UpperCamelCase : Union[str, Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 38
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class A__ ( __snake_case , __snake_case ):
_UpperCAmelCase :int = 'maskformer-swin'
_UpperCAmelCase :Union[str, Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , A_=224 , A_=4 , A_=3 , A_=96 , A_=[2, 2, 6, 2] , A_=[3, 6, 12, 24] , A_=7 , A_=4.0 , A_=True , A_=0.0 , A_=0.0 , A_=0.1 , A_="gelu" , A_=False , A_=0.02 , A_=1e-5 , A_=None , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase : Any = image_size
UpperCamelCase : int = patch_size
UpperCamelCase : Tuple = num_channels
UpperCamelCase : str = embed_dim
UpperCamelCase : Any = depths
UpperCamelCase : str = len(A_ )
UpperCamelCase : List[str] = num_heads
UpperCamelCase : str = window_size
UpperCamelCase : str = mlp_ratio
UpperCamelCase : Optional[int] = qkv_bias
UpperCamelCase : Union[str, Any] = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Dict = drop_path_rate
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Tuple = use_absolute_embeddings
UpperCamelCase : Dict = layer_norm_eps
UpperCamelCase : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase : List[Any] = int(embed_dim * 2 ** (len(A_ ) - 1) )
UpperCamelCase : str = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(A_ ) + 1 )]
UpperCamelCase : Optional[int] = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
| 714
|
from math import loga
def A_ ( _lowerCAmelCase ) -> int:
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38
| 0
|
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
UpperCamelCase : int = ""
for word_or_phrase in separated:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(_lowerCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 715
|
from __future__ import annotations
__lowerCamelCase : Optional[int] = """Muhammad Umer Farooq"""
__lowerCamelCase : Tuple = """MIT"""
__lowerCamelCase : Optional[int] = """1.0.0"""
__lowerCamelCase : int = """Muhammad Umer Farooq"""
__lowerCamelCase : Optional[int] = """contact@muhammadumerfarooq.me"""
__lowerCamelCase : Dict = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase : list[str] = []
UpperCamelCase : str = domain
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
UpperCamelCase : Any = parse.urljoin(self.domain , A_ )
self.urls.append(A_ )
def A_ ( _lowerCAmelCase ) -> str:
return ".".join(get_sub_domain_name(_lowerCAmelCase ).split("." )[-2:] )
def A_ ( _lowerCAmelCase ) -> str:
return parse.urlparse(_lowerCAmelCase ).netloc
def A_ ( _lowerCAmelCase = "https://github.com" ) -> list[str]:
UpperCamelCase : int = get_domain_name(_lowerCAmelCase )
# Initialize the parser
UpperCamelCase : str = Parser(_lowerCAmelCase )
try:
# Open URL
UpperCamelCase : int = requests.get(_lowerCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
UpperCamelCase : Optional[Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
UpperCamelCase : Optional[Any] = requests.get(_lowerCAmelCase )
# Get the valid email.
UpperCamelCase : Optional[int] = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_lowerCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Tuple = emails_from_url("""https://github.com""")
print(f"""{len(emails)} emails found:""")
print("""\n""".join(sorted(emails)))
| 38
| 0
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
UpperCamelCase : int = size if size is not None else {"height": 18, "width": 18}
UpperCamelCase : Tuple = parent
UpperCamelCase : List[str] = batch_size
UpperCamelCase : List[Any] = num_channels
UpperCamelCase : List[str] = image_size
UpperCamelCase : Any = min_resolution
UpperCamelCase : Tuple = max_resolution
UpperCamelCase : Dict = do_resize
UpperCamelCase : Optional[int] = size
UpperCamelCase : Union[str, Any] = do_normalize
UpperCamelCase : Union[str, Any] = image_mean
UpperCamelCase : List[str] = image_std
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :Optional[Any] = DPTImageProcessor if is_vision_available() else None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = DPTImageProcessingTester(self )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "size" ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
UpperCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCamelCase : Union[str, Any] = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCamelCase : List[str] = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCamelCase : List[str] = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 716
|
from __future__ import annotations
def A_ ( _lowerCAmelCase ) -> list[int]:
UpperCamelCase : Optional[Any] = [True] * limit
UpperCamelCase : Optional[Any] = False
UpperCamelCase : List[str] = False
UpperCamelCase : Tuple = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase : Optional[Any] = i * 2
while index < limit:
UpperCamelCase : int = False
UpperCamelCase : Optional[int] = index + i
UpperCamelCase : Any = [2]
for i in range(3 , _lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCAmelCase )
return primes
def A_ ( _lowerCAmelCase = 100_0000 ) -> int:
UpperCamelCase : Union[str, Any] = prime_sieve(_lowerCAmelCase )
UpperCamelCase : List[str] = 0
UpperCamelCase : Union[str, Any] = 0
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + length , len(_lowerCAmelCase ) ):
UpperCamelCase : Dict = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase : int = j - i
UpperCamelCase : Dict = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 38
| 0
|
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def A_ ( ) -> Dict:
UpperCamelCase : Any = torch.nn.Linear(2 , 4 )
UpperCamelCase : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 )
UpperCamelCase : Optional[int] = torch.optim.lr_scheduler.OneCycleLR(_lowerCAmelCase , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
UpperCamelCase : str = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
UpperCamelCase : Union[str, Any] = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def A_ ( _lowerCAmelCase ) -> Tuple:
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def A_ ( _lowerCAmelCase ) -> Optional[int]:
UpperCamelCase : List[Any] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_lowerCAmelCase )
class A__ ( __snake_case ):
@require_cuda
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(A_ ):
UpperCamelCase : Union[str, Any] = Accelerator(cpu=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = Accelerator()
UpperCamelCase : Dict = GradientState()
assert state.num_steps == 1
UpperCamelCase : Tuple = 4
assert state.num_steps == 4
assert state.sync_gradients is True
UpperCamelCase : Dict = False
assert state.sync_gradients is False
GradientState._reset_state()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = Accelerator()
UpperCamelCase : Tuple = create_components()
(
UpperCamelCase
) : Dict = accelerator.prepare(A_ , A_ , A_ , A_ , A_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = Accelerator()
UpperCamelCase : Optional[Any] = create_components()
accelerator.prepare(A_ , A_ , A_ , A_ , A_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __UpperCamelCase( self ):
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*A_ , **A_ ):
pass
with patch("torch.cuda.set_device" , A_ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
UpperCamelCase : Dict = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = Accelerator()
UpperCamelCase : int = create_components()
accelerator.prepare(A_ , A_ , A_ , A_ , A_ )
UpperCamelCase : Any = get_signature(A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(A_ )
# make sure random weights don't match
load_random_weights(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) < 1e-3 )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = Accelerator()
UpperCamelCase : Optional[int] = create_components()
accelerator.prepare(A_ , A_ , A_ , A_ , A_ )
UpperCamelCase : Any = get_signature(A_ )
# saving hook
def save_config(A_ , A_ , A_ ):
UpperCamelCase : Dict = {"class_name": models[0].__class__.__name__}
with open(os.path.join(A_ , "data.json" ) , "w" ) as f:
json.dump(A_ , A_ )
# loading hook
def load_config(A_ , A_ ):
with open(os.path.join(A_ , "data.json" ) , "r" ) as f:
UpperCamelCase : int = json.load(A_ )
UpperCamelCase : List[str] = config["class_name"]
UpperCamelCase : List[Any] = accelerator.register_save_state_pre_hook(A_ )
UpperCamelCase : List[Any] = accelerator.register_load_state_pre_hook(A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(A_ )
# make sure random weights don't match with hooks
load_random_weights(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) > 1e-3 )
# random class name to verify correct one is loaded
UpperCamelCase : Optional[Any] = "random"
# make sure loaded weights match with hooks
accelerator.load_state(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(A_ )
# make sure random weights don't match with hooks removed
load_random_weights(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) > 1e-3 )
# random class name to verify correct one is loaded
UpperCamelCase : int = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = Accelerator()
UpperCamelCase : Optional[Any] = create_components()
UpperCamelCase : Dict = None
# This should work
UpperCamelCase : Optional[Any] = accelerator.prepare(
A_ , A_ , A_ , A_ , A_ , A_ )
self.assertTrue(dummy_obj is None )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = Accelerator()
UpperCamelCase : Any = create_components()
UpperCamelCase : int = [1, 2, 3]
# This should work
UpperCamelCase : List[Any] = accelerator.prepare(
A_ , A_ , A_ , A_ , A_ , A_ )
self.assertEqual(
getattr(A_ , "_is_accelerate_prepared" , A_ ) , A_ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(A_ , "_is_accelerate_prepared" , A_ ) , A_ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(A_ , "_is_accelerate_prepared" , A_ ) , A_ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(A_ , "_is_accelerate_prepared" , A_ ) , A_ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(A_ , "_is_accelerate_prepared" , A_ ) , A_ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(A_ , "_is_accelerate_prepared" , A_ ) , A_ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def __UpperCamelCase( self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
UpperCamelCase : Any = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=A_ , device_map={"": 0} , )
UpperCamelCase : Any = Accelerator()
# This should work
UpperCamelCase : str = accelerator.prepare(A_ )
@slow
@require_bnb
def __UpperCamelCase( self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
UpperCamelCase : Optional[Any] = Accelerator()
with init_empty_weights():
UpperCamelCase : List[Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
UpperCamelCase : Tuple = infer_auto_device_map(A_ )
UpperCamelCase : Tuple = "cpu"
UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=A_ , load_in_abit=A_ , llm_inta_enable_fpaa_cpu_offload=A_ )
# This should not work and get value error
with self.assertRaises(A_ ):
UpperCamelCase : Any = accelerator.prepare(A_ )
@slow
@require_bnb
@require_multi_gpu
def __UpperCamelCase( self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
UpperCamelCase : str = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
UpperCamelCase : Union[str, Any] = infer_auto_device_map(A_ )
UpperCamelCase : int = 1
UpperCamelCase : Any = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=A_ , device_map=A_ , )
UpperCamelCase : List[Any] = Accelerator()
# This should not work and get value error
with self.assertRaises(A_ ):
UpperCamelCase : int = accelerator.prepare(A_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __UpperCamelCase( self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
UpperCamelCase : Optional[int] = infer_auto_device_map(A_ )
UpperCamelCase : Any = 1
UpperCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=A_ , device_map=A_ , )
UpperCamelCase : List[str] = Accelerator()
# This should work
UpperCamelCase : Optional[Any] = accelerator.prepare(A_ )
@require_cuda
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = torch.nn.Linear(10 , 10 )
UpperCamelCase : Tuple = torch.optim.SGD(model.parameters() , lr=0.01 )
UpperCamelCase : Tuple = Accelerator(cpu=A_ )
UpperCamelCase : str = accelerator.prepare(A_ )
| 717
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class A__ ( __snake_case ):
def __init__( self , A_ , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , A_ = None , **A_ , ):
'''simple docstring'''
super().__init__(
features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , num_proc=A_ , **A_ , )
UpperCamelCase : Optional[int] = Generator(
cache_dir=A_ , features=A_ , generator=A_ , gen_kwargs=A_ , **A_ , )
def __UpperCamelCase( self ):
'''simple docstring'''
if self.streaming:
UpperCamelCase : Optional[Any] = self.builder.as_streaming_dataset(split="train" )
# Build regular (map-style) dataset
else:
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : List[Any] = None
UpperCamelCase : List[str] = None
self.builder.download_and_prepare(
download_config=A_ , download_mode=A_ , verification_mode=A_ , base_path=A_ , num_proc=self.num_proc , )
UpperCamelCase : int = self.builder.as_dataset(
split="train" , verification_mode=A_ , in_memory=self.keep_in_memory )
return dataset
| 38
| 0
|
def A_ ( _lowerCAmelCase ):
if n == 1 or not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return 0
elif n == 2:
return 1
else:
UpperCamelCase : Tuple = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def A_ ( _lowerCAmelCase ):
UpperCamelCase : Optional[Any] = 0
UpperCamelCase : Optional[Any] = 2
while digits < n:
index += 1
UpperCamelCase : Optional[int] = len(str(fibonacci(_lowerCAmelCase ) ) )
return index
def A_ ( _lowerCAmelCase = 1000 ):
return fibonacci_digits_index(_lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 718
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def A_ ( _lowerCAmelCase ) -> Union[str, Any]: # picklable for multiprocessing
return x.sum()
def A_ ( _lowerCAmelCase ) -> Optional[Any]: # picklable for multiprocessing
return i + 1
@dataclass
class A__ :
_UpperCAmelCase :int
_UpperCAmelCase :str
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[Any] = []
UpperCamelCase : List[Any] = 1
UpperCamelCase : Tuple = [1, 2]
UpperCamelCase : Optional[Any] = {"a": 1, "b": 2}
UpperCamelCase : Optional[Any] = {"a": [1, 2], "b": [3, 4]}
UpperCamelCase : Any = {"a": {"1": 1}, "b": 2}
UpperCamelCase : List[str] = {"a": 1, "b": 2, "c": 3, "d": 4}
UpperCamelCase : Dict = {}
UpperCamelCase : Any = []
UpperCamelCase : Any = 2
UpperCamelCase : Any = [2, 3]
UpperCamelCase : Optional[Any] = {"a": 2, "b": 3}
UpperCamelCase : List[Any] = {"a": [2, 3], "b": [4, 5]}
UpperCamelCase : Tuple = {"a": {"1": 2}, "b": 3}
UpperCamelCase : Dict = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
UpperCamelCase : List[str] = 2
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
UpperCamelCase : List[str] = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
UpperCamelCase : int = {"a": 2, "b": 0, "c": 2}
UpperCamelCase : Union[str, Any] = {
"a": np.eye(2 ).astype(A_ ),
"b": np.zeros(3 ).astype(A_ ),
"c": np.ones(2 ).astype(A_ ),
}
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(A_ ): # can't pickle a local lambda
map_nested(lambda A_ : x + 1 , A_ , num_proc=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {"a": 1, "b": 2}
UpperCamelCase : List[Any] = {"a": 3, "b": 4}
UpperCamelCase : Tuple = {"a": 5, "b": 6}
UpperCamelCase : Union[str, Any] = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(A_ , A_ , A_ ) ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
class A__ :
_UpperCAmelCase :str = 'bar'
UpperCamelCase : List[Any] = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(A_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
UpperCamelCase : Union[str, Any] = {F"""{i}""": i for i in range(_lowerCAmelCase )}
UpperCamelCase : List[str] = map_nested(lambda _lowerCAmelCase : x + 10 , _lowerCAmelCase , num_proc=_lowerCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A__ ( __snake_case ):
@require_tf
def __UpperCamelCase( self ):
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase : int = layers.Dense(2 )
def gen_random_output():
UpperCamelCase : Optional[Any] = tf.random.uniform((1, 3) )
return model(A_ ).numpy()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : List[Any] = gen_random_output()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : Dict = gen_random_output()
UpperCamelCase : Optional[int] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
import torch
def gen_random_output():
UpperCamelCase : Optional[Any] = torch.nn.Linear(3 , 2 )
UpperCamelCase : Dict = torch.rand(1 , 3 )
return model(A_ ).detach().numpy()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : Dict = gen_random_output()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : Optional[int] = gen_random_output()
UpperCamelCase : List[Any] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __UpperCamelCase( self ):
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase : Optional[Any] = gen_random_output()
with temp_seed(42 ):
UpperCamelCase : Optional[Any] = gen_random_output()
UpperCamelCase : Optional[Any] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Optional[Any] = NestedDataStructure(_lowerCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
UpperCamelCase : Dict = NestedDataStructure(_lowerCAmelCase ).flatten()
assert output == expected_output
def A_ ( ) -> List[Any]:
UpperCamelCase : str = A(x=1 , y="foobar" )
UpperCamelCase : Tuple = {"x": 1, "y": "foobar"}
assert asdict(_lowerCAmelCase ) == expected_output
UpperCamelCase : List[str] = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
UpperCamelCase : Tuple = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(_lowerCAmelCase ) == expected_output
with pytest.raises(_lowerCAmelCase ):
asdict([1, A(x=10 , y="foo" )] )
def A_ ( _lowerCAmelCase ) -> Tuple:
return text.split()
def A_ ( _lowerCAmelCase ) -> Dict:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def A_ ( ) -> str:
with Pool(2 ) as pool:
UpperCamelCase : List[str] = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase : Dict = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase : Any = []
for yield_time, content in iflatmap_unordered(
_lowerCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_lowerCAmelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(_lowerCAmelCase ) == 4
| 38
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
# TODO: upload to AWS
__lowerCamelCase : List[str] = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class A__ ( __snake_case ):
_UpperCAmelCase :Any = 'retribert'
def __init__( self , A_=3_0522 , A_=768 , A_=8 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=True , A_=128 , A_=0 , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=A_ , **A_ )
UpperCamelCase : Tuple = vocab_size
UpperCamelCase : List[Any] = hidden_size
UpperCamelCase : int = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : Tuple = hidden_act
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : Dict = hidden_dropout_prob
UpperCamelCase : str = attention_probs_dropout_prob
UpperCamelCase : List[Any] = max_position_embeddings
UpperCamelCase : Tuple = type_vocab_size
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : Dict = layer_norm_eps
UpperCamelCase : Union[str, Any] = share_encoders
UpperCamelCase : Tuple = projection_dim
| 719
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Tuple = ['note_seq']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["note_seq"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
| 38
| 0
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[str] = DownBlockaD # noqa F405
_UpperCAmelCase :Dict = 'down'
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(A_ )
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :Any = ResnetDownsampleBlockaD # noqa F405
_UpperCAmelCase :Any = 'down'
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(A_ )
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :Optional[int] = AttnDownBlockaD # noqa F405
_UpperCAmelCase :Optional[int] = 'down'
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(A_ )
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[Any] = CrossAttnDownBlockaD # noqa F405
_UpperCAmelCase :List[Any] = 'down'
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = super().prepare_init_args_and_inputs_for_common()
UpperCamelCase : Optional[Any] = 32
return init_dict, inputs_dict
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(A_ )
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :Dict = SimpleCrossAttnDownBlockaD # noqa F405
_UpperCAmelCase :List[Any] = 'down'
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = super().prepare_init_args_and_inputs_for_common()
UpperCamelCase : Union[str, Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(A_ )
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[Any] = SkipDownBlockaD # noqa F405
_UpperCAmelCase :Dict = 'down'
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(A_ )
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :Dict = AttnSkipDownBlockaD # noqa F405
_UpperCAmelCase :Any = 'down'
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(A_ )
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[str] = DownEncoderBlockaD # noqa F405
_UpperCAmelCase :Optional[int] = 'down'
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = {
"in_channels": 32,
"out_channels": 32,
}
UpperCamelCase : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(A_ )
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[Any] = AttnDownEncoderBlockaD # noqa F405
_UpperCAmelCase :Optional[int] = 'down'
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = {
"in_channels": 32,
"out_channels": 32,
}
UpperCamelCase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(A_ )
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :Union[str, Any] = UNetMidBlockaD # noqa F405
_UpperCAmelCase :Optional[Any] = 'mid'
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = {
"in_channels": 32,
"temb_channels": 128,
}
UpperCamelCase : List[Any] = self.dummy_input
return init_dict, inputs_dict
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(A_ )
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :Optional[int] = UNetMidBlockaDCrossAttn # noqa F405
_UpperCAmelCase :Optional[Any] = 'mid'
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = super().prepare_init_args_and_inputs_for_common()
UpperCamelCase : Dict = 32
return init_dict, inputs_dict
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(A_ )
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :int = UNetMidBlockaDSimpleCrossAttn # noqa F405
_UpperCAmelCase :Optional[Any] = 'mid'
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = super().prepare_init_args_and_inputs_for_common()
UpperCamelCase : Tuple = 32
return init_dict, inputs_dict
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(A_ )
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :Union[str, Any] = UpBlockaD # noqa F405
_UpperCAmelCase :Dict = 'up'
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(A_ )
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :str = ResnetUpsampleBlockaD # noqa F405
_UpperCAmelCase :Tuple = 'up'
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(A_ )
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :Optional[int] = CrossAttnUpBlockaD # noqa F405
_UpperCAmelCase :Dict = 'up'
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = super().prepare_init_args_and_inputs_for_common()
UpperCamelCase : int = 32
return init_dict, inputs_dict
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(A_ )
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[Any] = SimpleCrossAttnUpBlockaD # noqa F405
_UpperCAmelCase :Optional[Any] = 'up'
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=A_ , include_encoder_hidden_states=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = super().prepare_init_args_and_inputs_for_common()
UpperCamelCase : Optional[Any] = 32
return init_dict, inputs_dict
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(A_ )
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[Any] = AttnUpBlockaD # noqa F405
_UpperCAmelCase :Any = 'up'
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=A_ )
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(A_ )
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :Optional[int] = SkipUpBlockaD # noqa F405
_UpperCAmelCase :int = 'up'
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(A_ )
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :int = AttnSkipUpBlockaD # noqa F405
_UpperCAmelCase :Any = 'up'
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(A_ )
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :Optional[Any] = UpDecoderBlockaD # noqa F405
_UpperCAmelCase :str = 'up'
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = {"in_channels": 32, "out_channels": 32}
UpperCamelCase : Any = self.dummy_input
return init_dict, inputs_dict
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(A_ )
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :Union[str, Any] = AttnUpDecoderBlockaD # noqa F405
_UpperCAmelCase :Union[str, Any] = 'up'
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = {"in_channels": 32, "out_channels": 32}
UpperCamelCase : List[Any] = self.dummy_input
return init_dict, inputs_dict
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(A_ )
| 720
|
import math
import tensorflow as tf
from packaging import version
def A_ ( _lowerCAmelCase ) -> Any:
UpperCamelCase : List[Any] = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : Any = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def A_ ( _lowerCAmelCase ) -> Dict:
UpperCamelCase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(math.pi , x.dtype )
UpperCamelCase : Optional[Any] = tf.cast(0.044_715 , x.dtype )
UpperCamelCase : int = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_lowerCAmelCase , 3 )) ))
return x * cdf
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : str = tf.convert_to_tensor(_lowerCAmelCase )
return x * tf.tanh(tf.math.softplus(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Tuple = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(0.044_715 , x.dtype )
UpperCamelCase : Optional[Any] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : Any = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def A_ ( _lowerCAmelCase ) -> List[Any]:
return tf.clip_by_value(_gelu(_lowerCAmelCase ) , -10 , 10 )
def A_ ( _lowerCAmelCase , _lowerCAmelCase=-1 ) -> str:
UpperCamelCase , UpperCamelCase : List[Any] = tf.split(_lowerCAmelCase , 2 , axis=_lowerCAmelCase )
return a * tf.math.sigmoid(_lowerCAmelCase )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def A_ ( _lowerCAmelCase ) -> Any:
return tf.keras.activations.gelu(_lowerCAmelCase , approximate=_lowerCAmelCase )
__lowerCamelCase : Optional[int] = tf.keras.activations.gelu
__lowerCamelCase : int = approximate_gelu_wrap
else:
__lowerCamelCase : List[Any] = _gelu
__lowerCamelCase : Optional[Any] = _gelu_new
__lowerCamelCase : Any = {
"""gelu""": gelu,
"""gelu_10""": gelu_aa,
"""gelu_fast""": gelu_fast,
"""gelu_new""": gelu_new,
"""glu""": glu,
"""mish""": mish,
"""quick_gelu""": quick_gelu,
"""relu""": tf.keras.activations.relu,
"""sigmoid""": tf.keras.activations.sigmoid,
"""silu""": tf.keras.activations.swish,
"""swish""": tf.keras.activations.swish,
"""tanh""": tf.keras.activations.tanh,
}
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 38
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class A__ ( __snake_case ):
'''simple docstring'''
_UpperCAmelCase :torch.FloatTensor
_UpperCAmelCase :torch.FloatTensor
_UpperCAmelCase :Optional[torch.FloatTensor] = None
class A__ ( __snake_case , __snake_case ):
'''simple docstring'''
_UpperCAmelCase :int = 2
@register_to_config
def __init__( self , A_ = 0.02 , A_ = 100 , A_ = 1.0_07 , A_ = 80 , A_ = 0.05 , A_ = 50 , ):
'''simple docstring'''
UpperCamelCase : List[Any] = sigma_max
# setable values
UpperCamelCase : int = None
UpperCamelCase : np.IntTensor = None
UpperCamelCase : torch.FloatTensor = None # sigma(t_i)
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
return sample
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : List[Any] = num_inference_steps
UpperCamelCase : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
UpperCamelCase : List[Any] = torch.from_numpy(A_ ).to(A_ )
UpperCamelCase : Dict = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
UpperCamelCase : int = torch.tensor(A_ , dtype=torch.floataa , device=A_ )
def __UpperCamelCase( self , A_ , A_ , A_ = None ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
UpperCamelCase : Union[str, Any] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
UpperCamelCase : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCamelCase : Tuple = self.config.s_noise * randn_tensor(sample.shape , generator=A_ ).to(sample.device )
UpperCamelCase : List[str] = sigma + gamma * sigma
UpperCamelCase : List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ = True , ):
'''simple docstring'''
UpperCamelCase : List[Any] = sample_hat + sigma_hat * model_output
UpperCamelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
UpperCamelCase : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=A_ , derivative=A_ , pred_original_sample=A_ )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ = True , ):
'''simple docstring'''
UpperCamelCase : List[Any] = sample_prev + sigma_prev * model_output
UpperCamelCase : List[Any] = (sample_prev - pred_original_sample) / sigma_prev
UpperCamelCase : List[str] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=A_ , derivative=A_ , pred_original_sample=A_ )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
raise NotImplementedError()
| 721
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :str = KandinskyVaaPipeline
_UpperCAmelCase :str = [
'image_embeds',
'negative_image_embeds',
]
_UpperCAmelCase :str = ['image_embeds', 'negative_image_embeds']
_UpperCAmelCase :List[str] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCAmelCase :List[str] = False
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 100
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : List[str] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase : Dict = UNetaDConditionModel(**A_ )
return model
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.dummy_unet
UpperCamelCase : Optional[Any] = self.dummy_movq
UpperCamelCase : Dict = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , )
UpperCamelCase : Tuple = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCamelCase( self , A_ , A_=0 ):
'''simple docstring'''
UpperCamelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
if str(A_ ).startswith("mps" ):
UpperCamelCase : Optional[Any] = torch.manual_seed(A_ )
else:
UpperCamelCase : List[Any] = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase : Optional[int] = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = "cpu"
UpperCamelCase : List[str] = self.get_dummy_components()
UpperCamelCase : Tuple = self.pipeline_class(**A_ )
UpperCamelCase : List[str] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Dict = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase : Optional[int] = output.images
UpperCamelCase : int = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : int = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
UpperCamelCase : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
UpperCamelCase : Dict = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCamelCase : Tuple = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase : str = "red cat, 4k photo"
UpperCamelCase : str = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase : Tuple = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase : int = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase : Tuple = pipeline(
image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , output_type="np" , )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A_ , A_ )
| 38
| 0
|
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCamelCase : Any = str(bin(_lowerCAmelCase ) )[2:] # remove the leading "0b"
UpperCamelCase : List[str] = str(bin(_lowerCAmelCase ) )[2:] # remove the leading "0b"
UpperCamelCase : List[str] = max(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_lowerCAmelCase ) , b_binary.zfill(_lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A_ ( ) -> Dict:
UpperCamelCase : Tuple = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_lowerCAmelCase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_lowerCAmelCase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_lowerCAmelCase )
return parser.parse_args()
def A_ ( ) -> Optional[int]:
UpperCamelCase : Tuple = parse_args()
# Import training_script as a module.
UpperCamelCase : Union[str, Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCamelCase : List[Any] = script_fpath.stem
UpperCamelCase : Optional[Any] = importlib.import_module(_lowerCAmelCase )
# Patch sys.argv
UpperCamelCase : List[Any] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 38
| 0
|
import os
from math import logaa
def A_ ( _lowerCAmelCase = "base_exp.txt" ):
UpperCamelCase : float = 0
UpperCamelCase : str = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_lowerCAmelCase ) , _lowerCAmelCase ) ) ):
UpperCamelCase : Any = list(map(_lowerCAmelCase , line.split("," ) ) )
if x * logaa(_lowerCAmelCase ) > largest:
UpperCamelCase : Optional[int] = x * logaa(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 701
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Union[str, Any] = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 38
| 0
|
from __future__ import annotations
class A__ :
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = order
# a_{0} ... a_{k}
UpperCamelCase : Dict = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCamelCase : List[str] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCamelCase : List[str] = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCamelCase : Tuple = [0.0] * self.order
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
if len(A_ ) < self.order:
UpperCamelCase : Union[str, Any] = [1.0, *a_coeffs]
if len(A_ ) != self.order + 1:
UpperCamelCase : Optional[int] = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(A_ )}"""
)
raise ValueError(A_ )
if len(A_ ) != self.order + 1:
UpperCamelCase : List[str] = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(A_ )}"""
)
raise ValueError(A_ )
UpperCamelCase : Dict = a_coeffs
UpperCamelCase : Union[str, Any] = b_coeffs
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : int = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCamelCase : Dict = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCamelCase : Optional[Any] = self.input_history[:-1]
UpperCamelCase : Dict = self.output_history[:-1]
UpperCamelCase : List[Any] = sample
UpperCamelCase : Any = result
return result
| 702
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=10 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=None , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = size if size is not None else {"shortest_edge": 18}
UpperCamelCase : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCamelCase : Optional[Any] = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : List[Any] = num_channels
UpperCamelCase : Union[str, Any] = num_frames
UpperCamelCase : Any = image_size
UpperCamelCase : Tuple = min_resolution
UpperCamelCase : Optional[Any] = max_resolution
UpperCamelCase : Any = do_resize
UpperCamelCase : Tuple = size
UpperCamelCase : List[Any] = do_normalize
UpperCamelCase : Optional[int] = image_mean
UpperCamelCase : Any = image_std
UpperCamelCase : str = crop_size
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[str] = VivitImageProcessor if is_vision_available() else None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = VivitImageProcessingTester(self )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "do_center_crop" ) )
self.assertTrue(hasattr(A_ , "size" ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
UpperCamelCase : Any = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : str = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : str = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : Any = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : List[Any] = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 38
| 0
|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def A_ ( _lowerCAmelCase , _lowerCAmelCase = True , _lowerCAmelCase = math.inf , _lowerCAmelCase = -math.inf , _lowerCAmelCase = math.inf , _lowerCAmelCase = -math.inf , _lowerCAmelCase = False , _lowerCAmelCase = 100 , _lowerCAmelCase = 0.01 , _lowerCAmelCase = 1 , ) -> Any:
UpperCamelCase : int = False
UpperCamelCase : Tuple = search_prob
UpperCamelCase : str = start_temperate
UpperCamelCase : Any = []
UpperCamelCase : str = 0
UpperCamelCase : str = None
while not search_end:
UpperCamelCase : Any = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCamelCase : List[Any] = current_state
scores.append(_lowerCAmelCase )
iterations += 1
UpperCamelCase : List[str] = None
UpperCamelCase : Dict = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCamelCase : List[Any] = random.randint(0 , len(_lowerCAmelCase ) - 1 ) # picking a random neighbor
UpperCamelCase : Dict = neighbors.pop(_lowerCAmelCase )
UpperCamelCase : Any = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCamelCase : int = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCamelCase : List[str] = picked_neighbor
else:
UpperCamelCase : List[str] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCamelCase : Optional[Any] = picked_neighbor
UpperCamelCase : Any = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCamelCase : Dict = True
else:
UpperCamelCase : List[str] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_lowerCAmelCase ) , _lowerCAmelCase )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__lowerCamelCase : Optional[int] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowerCamelCase : Union[str, Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
__lowerCamelCase : List[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowerCamelCase : List[Any] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
return (3 * x**2) - (6 * y)
__lowerCamelCase : int = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowerCamelCase : int = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f"""{local_min.score()}"""
)
__lowerCamelCase : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowerCamelCase : int = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f"""{local_min.score()}"""
)
| 703
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__lowerCamelCase : Dict = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__lowerCamelCase : Tuple = {
"""facebook/blenderbot_small-90M""": 512,
}
class A__ ( __snake_case ):
_UpperCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase :Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Optional[Any] = BlenderbotSmallTokenizer
def __init__( self , A_=None , A_=None , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|endoftext|>" , A_=False , A_=True , **A_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=A_ , merges=A_ , add_prefix_space=A_ , trim_offsets=A_ , ) , bos_token=A_ , eos_token=A_ , unk_token=A_ , **A_ , )
UpperCamelCase : Union[str, Any] = add_prefix_space
def __UpperCamelCase( self , A_ , A_=None ):
'''simple docstring'''
UpperCamelCase : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Tuple = [self.sep_token_id]
UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 38
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :str = KandinskyVaaPipeline
_UpperCAmelCase :str = [
'image_embeds',
'negative_image_embeds',
]
_UpperCAmelCase :str = ['image_embeds', 'negative_image_embeds']
_UpperCAmelCase :List[str] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCAmelCase :List[str] = False
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 100
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : List[str] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase : Dict = UNetaDConditionModel(**A_ )
return model
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.dummy_unet
UpperCamelCase : Optional[Any] = self.dummy_movq
UpperCamelCase : Dict = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , )
UpperCamelCase : Tuple = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCamelCase( self , A_ , A_=0 ):
'''simple docstring'''
UpperCamelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
if str(A_ ).startswith("mps" ):
UpperCamelCase : Optional[Any] = torch.manual_seed(A_ )
else:
UpperCamelCase : List[Any] = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase : Optional[int] = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = "cpu"
UpperCamelCase : List[str] = self.get_dummy_components()
UpperCamelCase : Tuple = self.pipeline_class(**A_ )
UpperCamelCase : List[str] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Dict = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase : Optional[int] = output.images
UpperCamelCase : int = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : int = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
UpperCamelCase : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
UpperCamelCase : Dict = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCamelCase : Tuple = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase : str = "red cat, 4k photo"
UpperCamelCase : str = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase : Tuple = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase : int = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase : Tuple = pipeline(
image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , output_type="np" , )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A_ , A_ )
| 704
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : int = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 38
| 0
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__lowerCamelCase : Dict = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__lowerCamelCase : Tuple = {
"""facebook/blenderbot_small-90M""": 512,
}
class A__ ( __snake_case ):
_UpperCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase :Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Optional[Any] = BlenderbotSmallTokenizer
def __init__( self , A_=None , A_=None , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|endoftext|>" , A_=False , A_=True , **A_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=A_ , merges=A_ , add_prefix_space=A_ , trim_offsets=A_ , ) , bos_token=A_ , eos_token=A_ , unk_token=A_ , **A_ , )
UpperCamelCase : Union[str, Any] = add_prefix_space
def __UpperCamelCase( self , A_ , A_=None ):
'''simple docstring'''
UpperCamelCase : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Tuple = [self.sep_token_id]
UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 705
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__lowerCamelCase : str = None
try:
import msvcrt
except ImportError:
__lowerCamelCase : str = None
try:
import fcntl
except ImportError:
__lowerCamelCase : List[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
__lowerCamelCase : str = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
__lowerCamelCase : Union[str, Any] = """3.0.12"""
__lowerCamelCase : Any = None
def A_ ( ) -> List[Any]:
global _logger
UpperCamelCase : Any = _logger or logging.getLogger(__name__ )
return _logger
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = lock_file
return None
def __str__( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class A__ :
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = lock
return None
def __enter__( self ):
'''simple docstring'''
return self.lock
def __exit__( self , A_ , A_ , A_ ):
'''simple docstring'''
self.lock.release()
return None
class A__ :
def __init__( self , A_ , A_=-1 , A_=None ):
'''simple docstring'''
UpperCamelCase : List[Any] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
UpperCamelCase : Dict = self.hash_filename_if_too_long(A_ , A_ )
# The path to the lock file.
UpperCamelCase : List[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCamelCase : Tuple = None
# The default timeout value.
UpperCamelCase : Optional[Any] = timeout
# We use this lock primarily for the lock counter.
UpperCamelCase : Union[str, Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCamelCase : Dict = 0
return None
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._lock_file
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = float(A_ )
return None
def __UpperCamelCase( self ):
'''simple docstring'''
raise NotImplementedError()
def __UpperCamelCase( self ):
'''simple docstring'''
raise NotImplementedError()
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._lock_file_fd is not None
def __UpperCamelCase( self , A_=None , A_=0.05 ):
'''simple docstring'''
if timeout is None:
UpperCamelCase : Optional[Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCamelCase : Dict = id(self )
UpperCamelCase : List[str] = self._lock_file
UpperCamelCase : int = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(A_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCamelCase : List[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __UpperCamelCase( self , A_=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCamelCase : List[Any] = id(self )
UpperCamelCase : Dict = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
UpperCamelCase : Dict = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self , A_ , A_ , A_ ):
'''simple docstring'''
self.release()
return None
def __del__( self ):
'''simple docstring'''
self.release(force=A_ )
return None
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = os.path.basename(A_ )
if len(A_ ) > max_length and max_length > 0:
UpperCamelCase : Optional[int] = os.path.dirname(A_ )
UpperCamelCase : int = str(hash(A_ ) )
UpperCamelCase : Any = filename[: max_length - len(A_ ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(A_ , A_ )
else:
return path
class A__ ( __snake_case ):
def __init__( self , A_ , A_=-1 , A_=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(A_ , timeout=A_ , max_filename_length=A_ )
UpperCamelCase : List[Any] = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCamelCase : str = os.open(self._lock_file , A_ )
except OSError:
pass
else:
try:
msvcrt.locking(A_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(A_ )
else:
UpperCamelCase : Optional[Any] = fd
return None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self._lock_file_fd
UpperCamelCase : str = None
msvcrt.locking(A_ , msvcrt.LK_UNLCK , 1 )
os.close(A_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class A__ ( __snake_case ):
def __init__( self , A_ , A_=-1 , A_=None ):
'''simple docstring'''
UpperCamelCase : Tuple = os.statvfs(os.path.dirname(A_ ) ).f_namemax
super().__init__(A_ , timeout=A_ , max_filename_length=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCamelCase : int = os.open(self._lock_file , A_ )
try:
fcntl.flock(A_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(A_ )
else:
UpperCamelCase : List[str] = fd
return None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self._lock_file_fd
UpperCamelCase : List[Any] = None
fcntl.flock(A_ , fcntl.LOCK_UN )
os.close(A_ )
return None
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCamelCase : Optional[int] = os.open(self._lock_file , A_ )
except OSError:
pass
else:
UpperCamelCase : Tuple = fd
return None
def __UpperCamelCase( self ):
'''simple docstring'''
os.close(self._lock_file_fd )
UpperCamelCase : str = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__lowerCamelCase : Dict = None
if msvcrt:
__lowerCamelCase : Any = WindowsFileLock
elif fcntl:
__lowerCamelCase : Any = UnixFileLock
else:
__lowerCamelCase : int = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 38
| 0
|
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class A__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=50 , A_=0.02 , A_=True , A_=None , ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = parent
UpperCamelCase : List[str] = batch_size
UpperCamelCase : Optional[int] = seq_length
UpperCamelCase : Union[str, Any] = is_training
UpperCamelCase : List[Any] = use_input_mask
UpperCamelCase : str = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Union[str, Any] = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : List[str] = intermediate_size
UpperCamelCase : int = hidden_act
UpperCamelCase : Union[str, Any] = hidden_dropout_prob
UpperCamelCase : List[str] = attention_probs_dropout_prob
UpperCamelCase : Tuple = max_position_embeddings
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Union[str, Any] = use_labels
UpperCamelCase : str = scope
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : List[str] = None
if self.use_input_mask:
UpperCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, token_labels
def __UpperCamelCase( self ):
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=A_ , initializer_range=self.initializer_range , )
def __UpperCamelCase( self ):
'''simple docstring'''
(
UpperCamelCase
) : Dict = self.prepare_config_and_inputs()
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , **A_ , ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = BertGenerationEncoder(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : List[str] = model(A_ , attention_mask=A_ )
UpperCamelCase : int = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , **A_ , ):
'''simple docstring'''
UpperCamelCase : str = True
UpperCamelCase : Tuple = BertGenerationEncoder(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Dict = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
UpperCamelCase : Optional[int] = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , **A_ , ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : Tuple = True
UpperCamelCase : Optional[int] = BertGenerationDecoder(config=A_ ).to(A_ ).eval()
# first forward pass
UpperCamelCase : str = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , )
UpperCamelCase : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase : Dict = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase : List[str] = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )["hidden_states"][0]
UpperCamelCase : Union[str, Any] = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )["hidden_states"][0]
# select random slice
UpperCamelCase : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1e-3 ) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , *A_ , ):
'''simple docstring'''
UpperCamelCase : Any = BertGenerationDecoder(A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
UpperCamelCase : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :str = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_UpperCAmelCase :Optional[int] = (BertGenerationDecoder,) if is_torch_available() else ()
_UpperCAmelCase :List[str] = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = BertGenerationEncoderTester(self )
UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
UpperCamelCase : Dict = "bert"
self.model_tester.create_and_check_model(A_ , A_ , A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
(
UpperCamelCase
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase : List[str] = None
self.model_tester.create_and_check_model_as_decoder(
A_ , A_ , A_ , A_ , A_ , A_ , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*A_ )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(A_ )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
UpperCamelCase : Tuple = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
UpperCamelCase : Union[str, Any] = model(A_ )[0]
UpperCamelCase : List[str] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , A_ )
UpperCamelCase : int = torch.tensor(
[[[0.17_75, 0.00_83, -0.03_21], [1.60_02, 0.12_87, 0.39_12], [2.14_73, 0.57_91, 0.60_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1e-4 ) )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
UpperCamelCase : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
UpperCamelCase : int = model(A_ )[0]
UpperCamelCase : Optional[int] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape , A_ )
UpperCamelCase : List[Any] = torch.tensor(
[[[-0.57_88, -2.59_94, -3.70_54], [0.04_38, 4.79_97, 1.87_95], [1.58_62, 6.64_09, 4.46_38]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1e-4 ) )
| 706
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ) -> str:
if config_name_or_path is None:
UpperCamelCase : Dict = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
UpperCamelCase : Tuple = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
UpperCamelCase : Tuple = question_encoder_name_or_path
UpperCamelCase : Any = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
UpperCamelCase : Optional[Any] = RagConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : Tuple = AutoConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : int = gen_config
UpperCamelCase : Dict = question_encoder_config
UpperCamelCase : Tuple = model_class.from_pretrained_question_encoder_generator(
_lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase )
rag_model.save_pretrained(_lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(_lowerCAmelCase )
# Save tokenizers.
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
__lowerCamelCase : Dict = parser.parse_args()
__lowerCamelCase : Dict = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 38
| 0
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(_lowerCAmelCase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
UpperCamelCase : Any = _distribute_shards(**_lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : List[str] = _split_gen_kwargs(_lowerCAmelCase , _lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
if expected is RuntimeError:
with pytest.raises(_lowerCAmelCase ):
_number_of_shards_in_gen_kwargs(_lowerCAmelCase )
else:
UpperCamelCase : Optional[int] = _number_of_shards_in_gen_kwargs(_lowerCAmelCase )
assert out == expected
| 707
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
UpperCamelCase : Dict = parent
UpperCamelCase : str = 13
UpperCamelCase : int = 7
UpperCamelCase : str = True
UpperCamelCase : Dict = True
UpperCamelCase : str = True
UpperCamelCase : Tuple = True
UpperCamelCase : List[str] = 99
UpperCamelCase : Optional[Any] = 384
UpperCamelCase : Tuple = 2
UpperCamelCase : Union[str, Any] = 4
UpperCamelCase : Dict = 37
UpperCamelCase : Any = "gelu"
UpperCamelCase : List[Any] = 0.1
UpperCamelCase : int = 0.1
UpperCamelCase : Tuple = 512
UpperCamelCase : List[Any] = 16
UpperCamelCase : int = 2
UpperCamelCase : Dict = 0.02
UpperCamelCase : Optional[Any] = 3
UpperCamelCase : List[Any] = 4
UpperCamelCase : Dict = 128
UpperCamelCase : Optional[Any] = 2
UpperCamelCase : Optional[int] = 9
UpperCamelCase : Optional[int] = 1
UpperCamelCase : Union[str, Any] = None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : str = None
if self.use_input_mask:
UpperCamelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Tuple = None
if self.use_token_type_ids:
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : List[Any] = None
if self.use_labels:
UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Any = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = TFConvBertModel(config=A_ )
UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase : Optional[int] = [input_ids, input_mask]
UpperCamelCase : Any = model(A_ )
UpperCamelCase : int = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = TFConvBertForMaskedLM(config=A_ )
UpperCamelCase : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : Dict = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = self.num_labels
UpperCamelCase : int = TFConvBertForSequenceClassification(config=A_ )
UpperCamelCase : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = self.num_choices
UpperCamelCase : str = TFConvBertForMultipleChoice(config=A_ )
UpperCamelCase : List[Any] = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : Dict = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : Any = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : List[str] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCamelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = self.num_labels
UpperCamelCase : str = TFConvBertForTokenClassification(config=A_ )
UpperCamelCase : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : str = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = TFConvBertForQuestionAnswering(config=A_ )
UpperCamelCase : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : Union[str, Any] = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Optional[Any] = config_and_inputs
UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCAmelCase :Optional[Any] = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase :Any = False
_UpperCAmelCase :int = False
_UpperCAmelCase :str = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = TFConvBertModelTester(self )
UpperCamelCase : Dict = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Any = True
if hasattr(A_ , "use_cache" ):
UpperCamelCase : List[str] = True
UpperCamelCase : List[Any] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Any = getattr(self.model_tester , "key_length" , A_ )
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = self._prepare_for_class(A_ , A_ )
UpperCamelCase : Dict = model_class(A_ )
UpperCamelCase : Optional[int] = len(model(A_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ , saved_model=A_ )
UpperCamelCase : Union[str, Any] = os.path.join(A_ , "saved_model" , "1" )
UpperCamelCase : Dict = tf.keras.models.load_model(A_ )
UpperCamelCase : str = model(A_ )
if self.is_encoder_decoder:
UpperCamelCase : Union[str, Any] = outputs["encoder_hidden_states"]
UpperCamelCase : Any = outputs["encoder_attentions"]
else:
UpperCamelCase : Any = outputs["hidden_states"]
UpperCamelCase : List[str] = outputs["attentions"]
self.assertEqual(len(A_ ) , A_ )
UpperCamelCase : int = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A_ ) , A_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Dict = True
UpperCamelCase : int = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Optional[int] = getattr(self.model_tester , "key_length" , A_ )
UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , A_ )
def check_decoder_attentions_output(A_ ):
UpperCamelCase : Optional[Any] = len(A_ )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase : Any = outputs.decoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(A_ ):
UpperCamelCase : Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : List[Any] = False
UpperCamelCase : Dict = model_class(A_ )
UpperCamelCase : Dict = model(self._prepare_for_class(A_ , A_ ) )
UpperCamelCase : List[str] = len(A_ )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
if self.is_encoder_decoder:
UpperCamelCase : int = model_class(A_ )
UpperCamelCase : Tuple = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_decoder_attentions_output(A_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase : Tuple = True
UpperCamelCase : int = model_class(A_ )
UpperCamelCase : Dict = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
# Check attention is always last and order is fine
UpperCamelCase : Optional[int] = True
UpperCamelCase : List[str] = True
UpperCamelCase : Optional[int] = model_class(A_ )
UpperCamelCase : Optional[Any] = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) )
self.assertEqual(model.config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase : List[str] = model(A_ )[0]
UpperCamelCase : int = [1, 6, 768]
self.assertEqual(output.shape , A_ )
UpperCamelCase : List[str] = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1e-4 )
| 38
| 0
|
__lowerCamelCase : Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__lowerCamelCase : Dict = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__lowerCamelCase : str = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
assert len(str(_lowerCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
UpperCamelCase : int = year // 100
UpperCamelCase : Optional[Any] = (5 * (century % 4) + 2) % 7
UpperCamelCase : str = year % 100
UpperCamelCase : List[str] = centurian % 12
UpperCamelCase : Union[str, Any] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
UpperCamelCase : List[str] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
UpperCamelCase : Dict = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __snake_case ):
_UpperCAmelCase :Union[str, Any] = 'camembert'
def __init__( self , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase : List[str] = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : Dict = hidden_act
UpperCamelCase : str = intermediate_size
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : Union[str, Any] = max_position_embeddings
UpperCamelCase : Optional[Any] = type_vocab_size
UpperCamelCase : int = initializer_range
UpperCamelCase : List[str] = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : int = use_cache
UpperCamelCase : List[str] = classifier_dropout
class A__ ( __snake_case ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 38
| 0
|
from manim import *
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase : str = Rectangle(height=0.25 , width=0.25 )
UpperCamelCase : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase : Optional[Any] = [mem.copy() for i in range(6 )]
UpperCamelCase : List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase : int = VGroup(*A_ ).arrange(A_ , buff=0 )
UpperCamelCase : Tuple = VGroup(*A_ ).arrange(A_ , buff=0 )
UpperCamelCase : Union[str, Any] = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
UpperCamelCase : Optional[int] = Text("CPU" , font_size=24 )
UpperCamelCase : List[Any] = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
UpperCamelCase : List[Any] = [mem.copy() for i in range(4 )]
UpperCamelCase : List[Any] = VGroup(*A_ ).arrange(A_ , buff=0 )
UpperCamelCase : Optional[Any] = Text("GPU" , font_size=24 )
UpperCamelCase : List[str] = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
UpperCamelCase : Any = [mem.copy() for i in range(6 )]
UpperCamelCase : Any = VGroup(*A_ ).arrange(A_ , buff=0 )
UpperCamelCase : Optional[Any] = Text("Model" , font_size=24 )
UpperCamelCase : List[Any] = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
UpperCamelCase : Dict = []
UpperCamelCase : int = []
UpperCamelCase : Tuple = []
for i, rect in enumerate(A_ ):
rect.set_stroke(A_ )
UpperCamelCase : Optional[Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=A_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=A_ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=A_ , buff=0.0 )
self.add(A_ )
model_cpu_arr.append(A_ )
self.add(*A_ , *A_ , *A_ )
UpperCamelCase : Any = [mem.copy() for i in range(6 )]
UpperCamelCase : List[str] = VGroup(*A_ ).arrange(A_ , buff=0 )
UpperCamelCase : Dict = Text("Loaded Checkpoint" , font_size=24 )
UpperCamelCase : Union[str, Any] = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(A_ )
UpperCamelCase : int = []
UpperCamelCase : List[Any] = []
for i, rect in enumerate(A_ ):
UpperCamelCase : int = fill.copy().set_fill(A_ , opacity=0.7 )
target.move_to(A_ )
ckpt_arr.append(A_ )
UpperCamelCase : Optional[Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(A_ )
self.add(*A_ , *A_ )
UpperCamelCase : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase : Any = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A_ , A_ )
UpperCamelCase : List[str] = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(A_ )
UpperCamelCase : Optional[int] = MarkupText(
F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
UpperCamelCase : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
UpperCamelCase : List[str] = [meta_mem.copy() for i in range(6 )]
UpperCamelCase : str = VGroup(*A_ ).arrange(A_ , buff=0 )
UpperCamelCase : Union[str, Any] = VGroup(*A_ ).arrange(A_ , buff=0 )
UpperCamelCase : List[str] = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
UpperCamelCase : str = Text("Disk" , font_size=24 )
UpperCamelCase : int = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(A_ , run_time=3 ) , Write(A_ , run_time=1 ) , Create(A_ , run_time=1 ) )
UpperCamelCase : Union[str, Any] = []
for i, rect in enumerate(A_ ):
UpperCamelCase : List[Any] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(A_ , run_time=1.5 ) )
self.play(*A_ )
self.play(FadeOut(A_ ) )
UpperCamelCase : Any = MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) )
self.play(
FadeOut(A_ , A_ , *A_ , *A_ ) , )
self.wait()
| 709
|
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
return int(input_a == input_a == 0 )
def A_ ( ) -> None:
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 38
| 0
|
'''simple docstring'''
from math import loga
def A_ ( _lowerCAmelCase ) -> int:
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __snake_case ):
_UpperCAmelCase :Optional[int] = ['image_processor', 'tokenizer']
_UpperCAmelCase :Tuple = 'BlipImageProcessor'
_UpperCAmelCase :Optional[int] = 'AutoTokenizer'
def __init__( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = False
super().__init__(A_ , A_ )
UpperCamelCase : str = self.image_processor
def __call__( self , A_ = None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
UpperCamelCase : int = self.tokenizer
UpperCamelCase : Optional[int] = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
return text_encoding
# add pixel_values
UpperCamelCase : int = self.image_processor(A_ , return_tensors=A_ )
if text is not None:
UpperCamelCase : Dict = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
else:
UpperCamelCase : Dict = None
if text_encoding is not None:
encoding_image_processor.update(A_ )
return encoding_image_processor
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.tokenizer.model_input_names
UpperCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 38
| 0
|
'''simple docstring'''
from __future__ import annotations
__lowerCamelCase : Tuple = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class A__ :
def __init__( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = graph
# mapping node to its parent in resulting breadth first tree
UpperCamelCase : dict[str, str | None] = {}
UpperCamelCase : Dict = source_vertex
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = {self.source_vertex}
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : List[Any] = [self.source_vertex] # first in first out queue
while queue:
UpperCamelCase : List[str] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(A_ )
UpperCamelCase : Dict = vertex
queue.append(A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCamelCase : Any = self.parent.get(A_ )
if target_vertex_parent is None:
UpperCamelCase : List[str] = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(A_ )
return self.shortest_path(A_ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
__lowerCamelCase : Tuple = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 711
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
class A__ ( __snake_case ):
_UpperCAmelCase :Tuple = ['audio_values', 'audio_mask']
def __init__( self , A_=2048 , A_=1 , A_=[16, 16] , A_=128 , A_=4_4100 , A_=86 , A_=2048 , A_=0.0 , **A_ , ):
'''simple docstring'''
super().__init__(
feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ , )
UpperCamelCase : Optional[int] = spectrogram_length
UpperCamelCase : Dict = num_channels
UpperCamelCase : Optional[Any] = patch_size
UpperCamelCase : str = feature_size // self.patch_size[1]
UpperCamelCase : List[str] = n_fft
UpperCamelCase : int = sampling_rate // hop_length_to_sampling_rate
UpperCamelCase : Optional[int] = sampling_rate
UpperCamelCase : int = padding_value
UpperCamelCase : str = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A_ , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=A_ , norm="slaney" , mel_scale="slaney" , ).T
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = spectrogram(
A_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
UpperCamelCase : List[Any] = log_spec[:, :-1]
UpperCamelCase : Optional[int] = log_spec - 20.0
UpperCamelCase : str = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , A_ , A_ = None , A_ = True , A_ = None , A_ = False , A_ = False , **A_ , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCamelCase : Optional[int] = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
UpperCamelCase : str = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
UpperCamelCase : str = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A_ ):
UpperCamelCase : int = [np.asarray(A_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
UpperCamelCase : List[str] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
UpperCamelCase : str = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
UpperCamelCase : Tuple = np.array(A_ ).astype(np.floataa )
# convert into correct format for padding
UpperCamelCase : Union[str, Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
UpperCamelCase : Any = np.ones([len(A_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
UpperCamelCase : List[str] = padded_audio_features * self.padding_value
for i in range(len(A_ ) ):
UpperCamelCase : Union[str, Any] = audio_features[i]
UpperCamelCase : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
UpperCamelCase : Optional[Any] = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
UpperCamelCase : int = {"audio_values": padded_audio_features}
UpperCamelCase : Any = BatchFeature(data=A_ , tensor_type=A_ )
return encoded_inputs
| 38
| 0
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger("""transformers.models.encodec""")
__lowerCamelCase : str = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__lowerCamelCase : Dict = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__lowerCamelCase : Union[str, Any] = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__lowerCamelCase : Optional[Any] = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__lowerCamelCase : Union[str, Any] = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__lowerCamelCase : Union[str, Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__lowerCamelCase : int = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__lowerCamelCase : List[Any] = []
__lowerCamelCase : Any = []
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
for attribute in key.split("." ):
UpperCamelCase : int = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
UpperCamelCase : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
UpperCamelCase : Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCamelCase : Tuple = value
elif weight_type == "weight_g":
UpperCamelCase : Any = value
elif weight_type == "weight_v":
UpperCamelCase : Optional[int] = value
elif weight_type == "bias":
UpperCamelCase : List[str] = value
elif weight_type == "running_mean":
UpperCamelCase : str = value
elif weight_type == "running_var":
UpperCamelCase : Tuple = value
elif weight_type == "num_batches_tracked":
UpperCamelCase : Dict = value
elif weight_type == "weight_ih_l0":
UpperCamelCase : Dict = value
elif weight_type == "weight_hh_l0":
UpperCamelCase : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
UpperCamelCase : Union[str, Any] = value
elif weight_type == "bias_hh_l0":
UpperCamelCase : List[str] = value
elif weight_type == "weight_ih_l1":
UpperCamelCase : List[Any] = value
elif weight_type == "weight_hh_l1":
UpperCamelCase : Any = value
elif weight_type == "bias_ih_l1":
UpperCamelCase : Dict = value
elif weight_type == "bias_hh_l1":
UpperCamelCase : List[str] = value
else:
UpperCamelCase : List[Any] = value
logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCamelCase : List[Any] = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
UpperCamelCase : Tuple = []
if model_name == "encodec_24khz" or "encodec_32khz":
UpperCamelCase : Tuple = MAPPING_24K
elif model_name == "encodec_48khz":
UpperCamelCase : Tuple = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(_lowerCAmelCase , _lowerCAmelCase ):
logger.info(F"""{name} was ignored""" )
continue
UpperCamelCase : Union[str, Any] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
UpperCamelCase : Optional[Any] = key.split(".*." )
if prefix in name and suffix in name:
UpperCamelCase : List[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
UpperCamelCase : Dict = True
if "*" in mapped_key:
UpperCamelCase : str = name.split(_lowerCAmelCase )[0].split("." )[-2]
UpperCamelCase : Optional[int] = mapped_key.replace("*" , _lowerCAmelCase )
if "weight_g" in name:
UpperCamelCase : Dict = "weight_g"
elif "weight_v" in name:
UpperCamelCase : List[Any] = "weight_v"
elif "weight_ih_l0" in name:
UpperCamelCase : str = "weight_ih_l0"
elif "weight_hh_l0" in name:
UpperCamelCase : Optional[Any] = "weight_hh_l0"
elif "bias_ih_l0" in name:
UpperCamelCase : Optional[int] = "bias_ih_l0"
elif "bias_hh_l0" in name:
UpperCamelCase : Union[str, Any] = "bias_hh_l0"
elif "weight_ih_l1" in name:
UpperCamelCase : List[Any] = "weight_ih_l1"
elif "weight_hh_l1" in name:
UpperCamelCase : Optional[Any] = "weight_hh_l1"
elif "bias_ih_l1" in name:
UpperCamelCase : int = "bias_ih_l1"
elif "bias_hh_l1" in name:
UpperCamelCase : Any = "bias_hh_l1"
elif "bias" in name:
UpperCamelCase : int = "bias"
elif "weight" in name:
UpperCamelCase : List[Any] = "weight"
elif "running_mean" in name:
UpperCamelCase : int = "running_mean"
elif "running_var" in name:
UpperCamelCase : Union[str, Any] = "running_var"
elif "num_batches_tracked" in name:
UpperCamelCase : Tuple = "num_batches_tracked"
else:
UpperCamelCase : int = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> Dict:
if config_path is not None:
UpperCamelCase : Optional[Any] = EncodecConfig.from_pretrained(_lowerCAmelCase )
else:
UpperCamelCase : List[Any] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
UpperCamelCase : Dict = [8, 5, 4, 4]
UpperCamelCase : Dict = [2.2]
UpperCamelCase : Tuple = 64
UpperCamelCase : Dict = 3_2000
UpperCamelCase : str = 2048
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Tuple = False
UpperCamelCase : Optional[Any] = False
elif model_name == "encodec_48khz":
UpperCamelCase : Optional[int] = [8, 5, 4, 2]
UpperCamelCase : str = [3.0, 6.0, 12.0, 24.0]
UpperCamelCase : int = 4_8000
UpperCamelCase : List[str] = 2
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Optional[int] = "time_group_norm"
UpperCamelCase : str = True
UpperCamelCase : str = 1.0
UpperCamelCase : List[str] = 0.01
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
UpperCamelCase : Tuple = EncodecModel(_lowerCAmelCase )
UpperCamelCase : Optional[int] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_lowerCAmelCase )
UpperCamelCase : List[Any] = torch.load(_lowerCAmelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
UpperCamelCase : str = original_checkpoint["best_state"]
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(_lowerCAmelCase )
model.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__lowerCamelCase : List[str] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 712
|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCamelCase : Dict = TypeVar("""KT""")
__lowerCamelCase : Dict = TypeVar("""VT""")
class A__ ( Generic[KT, VT] ):
def __init__( self , A_ = "root" , A_ = None ):
'''simple docstring'''
UpperCamelCase : int = key
UpperCamelCase : List[Any] = value
UpperCamelCase : list[Node[KT, VT]] = []
def __repr__( self ):
'''simple docstring'''
return F"""Node({self.key}: {self.value})"""
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return len(self.forward )
class A__ ( Generic[KT, VT] ):
def __init__( self , A_ = 0.5 , A_ = 16 ):
'''simple docstring'''
UpperCamelCase : Node[KT, VT] = Node[KT, VT]()
UpperCamelCase : List[Any] = 0
UpperCamelCase : Union[str, Any] = p
UpperCamelCase : List[str] = max_level
def __str__( self ):
'''simple docstring'''
UpperCamelCase : int = list(self )
if len(A_ ) == 0:
return F"""SkipList(level={self.level})"""
UpperCamelCase : str = max((len(str(A_ ) ) for item in items) , default=4 )
UpperCamelCase : Dict = max(A_ , 4 ) + 4
UpperCamelCase : str = self.head
UpperCamelCase : List[Any] = []
UpperCamelCase : int = node.forward.copy()
lines.append(F"""[{node.key}]""".ljust(A_ , "-" ) + "* " * len(A_ ) )
lines.append(" " * label_size + "| " * len(A_ ) )
while len(node.forward ) != 0:
UpperCamelCase : Union[str, Any] = node.forward[0]
lines.append(
F"""[{node.key}]""".ljust(A_ , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(A_ ) )
UpperCamelCase : Tuple = node.forward
lines.append("None".ljust(A_ ) + "* " * len(A_ ) )
return F"""SkipList(level={self.level})\n""" + "\n".join(A_ )
def __iter__( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
UpperCamelCase : Union[str, Any] = node.forward[0]
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = []
UpperCamelCase : List[Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
UpperCamelCase : str = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(A_ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : str = self._locate_node(A_ )
if node is not None:
for i, update_node in enumerate(A_ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
UpperCamelCase : Tuple = node.forward[i]
else:
UpperCamelCase : List[Any] = update_node.forward[:i]
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[int] = self._locate_node(A_ )
if node is not None:
UpperCamelCase : Union[str, Any] = value
else:
UpperCamelCase : Dict = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , A_ ):
update_vector.append(self.head )
UpperCamelCase : Optional[int] = level
UpperCamelCase : Dict = Node(A_ , A_ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(A_ )
else:
UpperCamelCase : List[Any] = new_node
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Union[str, Any] = self._locate_node(A_ )
if node is not None:
return node.value
return None
def A_ ( ) -> List[Any]:
UpperCamelCase : int = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 12 )
skip_list.insert("Key3" , 41 )
skip_list.insert("Key4" , -19 )
UpperCamelCase : Optional[int] = skip_list.head
UpperCamelCase : List[str] = {}
while node.level != 0:
UpperCamelCase : str = node.forward[0]
UpperCamelCase : Optional[int] = node.value
assert len(_lowerCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def A_ ( ) -> List[Any]:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert("Key1" , 10 )
skip_list.insert("Key1" , 12 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 10 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 10 )
UpperCamelCase : Dict = skip_list.head
UpperCamelCase : Tuple = {}
while node.level != 0:
UpperCamelCase : List[str] = node.forward[0]
UpperCamelCase : Dict = node.value
if len(_lowerCAmelCase ) != 4:
print()
assert len(_lowerCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def A_ ( ) -> List[Any]:
UpperCamelCase : List[Any] = SkipList()
assert skip_list.find("Some key" ) is None
def A_ ( ) -> Tuple:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert("Key2" , 20 )
assert skip_list.find("Key2" ) == 20
skip_list.insert("Some Key" , 10 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 13 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 10
assert skip_list.find("V" ) == 13
def A_ ( ) -> Dict:
UpperCamelCase : Optional[int] = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def A_ ( ) -> Dict:
UpperCamelCase : List[Any] = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def A_ ( ) -> List[str]:
UpperCamelCase : int = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 14
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def A_ ( ) -> List[Any]:
UpperCamelCase : List[Any] = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 142 )
skip_list.insert("Key2" , 15 )
skip_list.delete("X" )
def traverse_keys(_lowerCAmelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_lowerCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def A_ ( ) -> Union[str, Any]:
def is_sorted(_lowerCAmelCase ):
return all(next_item >= item for item, next_item in zip(_lowerCAmelCase , lst[1:] ) )
UpperCamelCase : int = SkipList()
for i in range(10 ):
skip_list.insert(_lowerCAmelCase , _lowerCAmelCase )
assert is_sorted(list(_lowerCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_lowerCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_lowerCAmelCase ) )
def A_ ( ) -> Tuple:
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def A_ ( ) -> List[str]:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 38
| 0
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
# Initialise PyTorch model
UpperCamelCase : Any = AlbertConfig.from_json_file(_lowerCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCamelCase : int = AlbertForPreTraining(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCamelCase : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 713
|
from PIL import Image
def A_ ( _lowerCAmelCase ) -> Image:
UpperCamelCase , UpperCamelCase : List[Any] = image.size
UpperCamelCase : Union[str, Any] = 0
UpperCamelCase : List[str] = image.load()
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
UpperCamelCase : List[Any] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCAmelCase ):
for i in range(_lowerCAmelCase ):
UpperCamelCase : Union[str, Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 38
| 0
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A__ ( __snake_case ):
_UpperCAmelCase :jnp.ndarray
_UpperCAmelCase :jnp.ndarray
class A__ ( nn.Module ):
_UpperCAmelCase :int
_UpperCAmelCase :Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
_UpperCAmelCase :jnp.dtype = jnp.floataa
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCamelCase : Optional[int] = []
for i in range(len(self.block_out_channels ) - 1 ):
UpperCamelCase : List[Any] = self.block_out_channels[i]
UpperCamelCase : Tuple = self.block_out_channels[i + 1]
UpperCamelCase : List[str] = nn.Conv(
A_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(A_ )
UpperCamelCase : List[str] = nn.Conv(
A_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(A_ )
UpperCamelCase : List[Any] = blocks
UpperCamelCase : Dict = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = self.conv_in(A_ )
UpperCamelCase : Any = nn.silu(A_ )
for block in self.blocks:
UpperCamelCase : int = block(A_ )
UpperCamelCase : Any = nn.silu(A_ )
UpperCamelCase : List[Any] = self.conv_out(A_ )
return embedding
@flax_register_to_config
class A__ ( nn.Module , __snake_case , __snake_case ):
_UpperCAmelCase :int = 3_2
_UpperCAmelCase :int = 4
_UpperCAmelCase :Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCAmelCase :Union[bool, Tuple[bool]] = False
_UpperCAmelCase :Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_UpperCAmelCase :int = 2
_UpperCAmelCase :Union[int, Tuple[int]] = 8
_UpperCAmelCase :Optional[Union[int, Tuple[int]]] = None
_UpperCAmelCase :int = 1_2_8_0
_UpperCAmelCase :float = 0.0
_UpperCAmelCase :bool = False
_UpperCAmelCase :jnp.dtype = jnp.floataa
_UpperCAmelCase :bool = True
_UpperCAmelCase :int = 0
_UpperCAmelCase :str = "rgb"
_UpperCAmelCase :Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCamelCase : Any = jnp.zeros(A_ , dtype=jnp.floataa )
UpperCamelCase : Union[str, Any] = jnp.ones((1,) , dtype=jnp.intaa )
UpperCamelCase : Dict = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
UpperCamelCase : List[str] = (1, 3, self.sample_size * 8, self.sample_size * 8)
UpperCamelCase : Optional[Any] = jnp.zeros(A_ , dtype=jnp.floataa )
UpperCamelCase : Union[str, Any] = jax.random.split(A_ )
UpperCamelCase : List[str] = {"params": params_rng, "dropout": dropout_rng}
return self.init(A_ , A_ , A_ , A_ , A_ )["params"]
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.block_out_channels
UpperCamelCase : Optional[Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCamelCase : Dict = self.num_attention_heads or self.attention_head_dim
# input
UpperCamelCase : int = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
UpperCamelCase : Optional[Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
UpperCamelCase : int = FlaxTimestepEmbedding(A_ , dtype=self.dtype )
UpperCamelCase : int = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
UpperCamelCase : Tuple = self.only_cross_attention
if isinstance(A_ , A_ ):
UpperCamelCase : Dict = (only_cross_attention,) * len(self.down_block_types )
if isinstance(A_ , A_ ):
UpperCamelCase : Any = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCamelCase : str = []
UpperCamelCase : Optional[Any] = []
UpperCamelCase : int = block_out_channels[0]
UpperCamelCase : str = nn.Conv(
A_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A_ )
for i, down_block_type in enumerate(self.down_block_types ):
UpperCamelCase : Union[str, Any] = output_channel
UpperCamelCase : Optional[Any] = block_out_channels[i]
UpperCamelCase : int = i == len(A_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCamelCase : List[Any] = FlaxCrossAttnDownBlockaD(
in_channels=A_ , out_channels=A_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
UpperCamelCase : Any = FlaxDownBlockaD(
in_channels=A_ , out_channels=A_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(A_ )
for _ in range(self.layers_per_block ):
UpperCamelCase : List[Any] = nn.Conv(
A_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A_ )
if not is_final_block:
UpperCamelCase : int = nn.Conv(
A_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(A_ )
UpperCamelCase : List[str] = down_blocks
UpperCamelCase : List[str] = controlnet_down_blocks
# mid
UpperCamelCase : int = block_out_channels[-1]
UpperCamelCase : Any = FlaxUNetMidBlockaDCrossAttn(
in_channels=A_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
UpperCamelCase : Dict = nn.Conv(
A_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , A_ , A_ , A_ , A_ , A_ = 1.0 , A_ = True , A_ = False , ):
'''simple docstring'''
UpperCamelCase : Tuple = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
UpperCamelCase : str = jnp.flip(A_ , axis=1 )
# 1. time
if not isinstance(A_ , jnp.ndarray ):
UpperCamelCase : Tuple = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(A_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCamelCase : Optional[Any] = timesteps.astype(dtype=jnp.floataa )
UpperCamelCase : List[Any] = jnp.expand_dims(A_ , 0 )
UpperCamelCase : int = self.time_proj(A_ )
UpperCamelCase : List[Any] = self.time_embedding(A_ )
# 2. pre-process
UpperCamelCase : int = jnp.transpose(A_ , (0, 2, 3, 1) )
UpperCamelCase : Dict = self.conv_in(A_ )
UpperCamelCase : List[Any] = jnp.transpose(A_ , (0, 2, 3, 1) )
UpperCamelCase : Optional[int] = self.controlnet_cond_embedding(A_ )
sample += controlnet_cond
# 3. down
UpperCamelCase : Any = (sample,)
for down_block in self.down_blocks:
if isinstance(A_ , A_ ):
UpperCamelCase : Union[str, Any] = down_block(A_ , A_ , A_ , deterministic=not train )
else:
UpperCamelCase : Any = down_block(A_ , A_ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
UpperCamelCase : Any = self.mid_block(A_ , A_ , A_ , deterministic=not train )
# 5. contronet blocks
UpperCamelCase : List[Any] = ()
for down_block_res_sample, controlnet_block in zip(A_ , self.controlnet_down_blocks ):
UpperCamelCase : Tuple = controlnet_block(A_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
UpperCamelCase : int = controlnet_down_block_res_samples
UpperCamelCase : Optional[int] = self.controlnet_mid_block(A_ )
# 6. scaling
UpperCamelCase : str = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=A_ , mid_block_res_sample=A_ )
| 714
|
from math import loga
def A_ ( _lowerCAmelCase ) -> int:
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class A__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A_ , A_=7 , A_=3 , A_=10 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=None , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = size if size is not None else {"shortest_edge": 18}
UpperCamelCase : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCamelCase : Optional[Any] = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : List[Any] = num_channels
UpperCamelCase : Union[str, Any] = num_frames
UpperCamelCase : Any = image_size
UpperCamelCase : Tuple = min_resolution
UpperCamelCase : Optional[Any] = max_resolution
UpperCamelCase : Any = do_resize
UpperCamelCase : Tuple = size
UpperCamelCase : List[Any] = do_normalize
UpperCamelCase : Optional[int] = image_mean
UpperCamelCase : Any = image_std
UpperCamelCase : str = crop_size
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase :List[str] = VivitImageProcessor if is_vision_available() else None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = VivitImageProcessingTester(self )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "do_center_crop" ) )
self.assertTrue(hasattr(A_ , "size" ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
UpperCamelCase : Any = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : str = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : str = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : Any = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : List[Any] = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 715
|
from __future__ import annotations
__lowerCamelCase : Optional[int] = """Muhammad Umer Farooq"""
__lowerCamelCase : Tuple = """MIT"""
__lowerCamelCase : Optional[int] = """1.0.0"""
__lowerCamelCase : int = """Muhammad Umer Farooq"""
__lowerCamelCase : Optional[int] = """contact@muhammadumerfarooq.me"""
__lowerCamelCase : Dict = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase : list[str] = []
UpperCamelCase : str = domain
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
UpperCamelCase : Any = parse.urljoin(self.domain , A_ )
self.urls.append(A_ )
def A_ ( _lowerCAmelCase ) -> str:
return ".".join(get_sub_domain_name(_lowerCAmelCase ).split("." )[-2:] )
def A_ ( _lowerCAmelCase ) -> str:
return parse.urlparse(_lowerCAmelCase ).netloc
def A_ ( _lowerCAmelCase = "https://github.com" ) -> list[str]:
UpperCamelCase : int = get_domain_name(_lowerCAmelCase )
# Initialize the parser
UpperCamelCase : str = Parser(_lowerCAmelCase )
try:
# Open URL
UpperCamelCase : int = requests.get(_lowerCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
UpperCamelCase : Optional[Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
UpperCamelCase : Optional[Any] = requests.get(_lowerCAmelCase )
# Get the valid email.
UpperCamelCase : Optional[int] = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_lowerCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Tuple = emails_from_url("""https://github.com""")
print(f"""{len(emails)} emails found:""")
print("""\n""".join(sorted(emails)))
| 38
| 0
|
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__lowerCamelCase : Any = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
__lowerCamelCase : Optional[int] = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
__lowerCamelCase : Any = r"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = 0.0
for i, j in zip(A_ , A_ ):
n_correct += 1.0 if math_equivalence.is_equiv(A_ , A_ ) else 0.0
UpperCamelCase : Optional[Any] = n_correct / len(A_ )
return {
"accuracy": accuracy,
}
| 716
|
from __future__ import annotations
def A_ ( _lowerCAmelCase ) -> list[int]:
UpperCamelCase : Optional[Any] = [True] * limit
UpperCamelCase : Optional[Any] = False
UpperCamelCase : List[str] = False
UpperCamelCase : Tuple = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase : Optional[Any] = i * 2
while index < limit:
UpperCamelCase : int = False
UpperCamelCase : Optional[int] = index + i
UpperCamelCase : Any = [2]
for i in range(3 , _lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCAmelCase )
return primes
def A_ ( _lowerCAmelCase = 100_0000 ) -> int:
UpperCamelCase : Union[str, Any] = prime_sieve(_lowerCAmelCase )
UpperCamelCase : List[str] = 0
UpperCamelCase : Union[str, Any] = 0
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + length , len(_lowerCAmelCase ) ):
UpperCamelCase : Dict = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase : int = j - i
UpperCamelCase : Dict = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 38
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class A__ ( __snake_case ):
_UpperCAmelCase :Dict = 'xlm'
_UpperCAmelCase :Dict = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self , A_=3_0145 , A_=2048 , A_=12 , A_=16 , A_=0.1 , A_=0.1 , A_=True , A_=False , A_=False , A_=False , A_=1 , A_=True , A_=512 , A_=2048**-0.5 , A_=1e-12 , A_=0.02 , A_=0 , A_=1 , A_=2 , A_=3 , A_=5 , A_=True , A_="first" , A_=True , A_=None , A_=True , A_=0.1 , A_=5 , A_=5 , A_=0 , A_=0 , A_=2 , A_=0 , **A_ , ):
'''simple docstring'''
UpperCamelCase : Any = vocab_size
UpperCamelCase : Union[str, Any] = emb_dim
UpperCamelCase : Union[str, Any] = n_layers
UpperCamelCase : Union[str, Any] = n_heads
UpperCamelCase : List[Any] = dropout
UpperCamelCase : Any = attention_dropout
UpperCamelCase : str = gelu_activation
UpperCamelCase : Optional[Any] = sinusoidal_embeddings
UpperCamelCase : int = causal
UpperCamelCase : Dict = asm
UpperCamelCase : Optional[Any] = n_langs
UpperCamelCase : Union[str, Any] = use_lang_emb
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : Any = bos_index
UpperCamelCase : Any = eos_index
UpperCamelCase : List[str] = pad_index
UpperCamelCase : Optional[Any] = unk_index
UpperCamelCase : List[str] = mask_index
UpperCamelCase : Optional[int] = is_encoder
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Tuple = embed_init_std
UpperCamelCase : Dict = init_std
UpperCamelCase : List[Any] = summary_type
UpperCamelCase : Dict = summary_use_proj
UpperCamelCase : List[Any] = summary_activation
UpperCamelCase : Tuple = summary_proj_to_labels
UpperCamelCase : List[Any] = summary_first_dropout
UpperCamelCase : Tuple = start_n_top
UpperCamelCase : Tuple = end_n_top
UpperCamelCase : Dict = mask_token_id
UpperCamelCase : Tuple = lang_id
if "n_words" in kwargs:
UpperCamelCase : Any = kwargs["n_words"]
super().__init__(pad_token_id=A_ , bos_token_id=A_ , **A_ )
class A__ ( __snake_case ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 717
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class A__ ( __snake_case ):
def __init__( self , A_ , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , A_ = None , **A_ , ):
'''simple docstring'''
super().__init__(
features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , num_proc=A_ , **A_ , )
UpperCamelCase : Optional[int] = Generator(
cache_dir=A_ , features=A_ , generator=A_ , gen_kwargs=A_ , **A_ , )
def __UpperCamelCase( self ):
'''simple docstring'''
if self.streaming:
UpperCamelCase : Optional[Any] = self.builder.as_streaming_dataset(split="train" )
# Build regular (map-style) dataset
else:
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : List[Any] = None
UpperCamelCase : List[str] = None
self.builder.download_and_prepare(
download_config=A_ , download_mode=A_ , verification_mode=A_ , base_path=A_ , num_proc=self.num_proc , )
UpperCamelCase : int = self.builder.as_dataset(
split="train" , verification_mode=A_ , in_memory=self.keep_in_memory )
return dataset
| 38
| 0
|
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCamelCase : List[Any] = """
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=8 ):
UpperCamelCase : Union[str, Any] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
UpperCamelCase : Any = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class A__ ( __snake_case ):
def __init__( self , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=A_ , tokenizer=A_ , unet=A_ , scheduler=A_ , movq=A_ , )
UpperCamelCase : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if latents is None:
UpperCamelCase : int = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCamelCase : Optional[Any] = latents.to(A_ )
UpperCamelCase : Dict = latents * scheduler.init_noise_sigma
return latents
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_=None , ):
'''simple docstring'''
UpperCamelCase : int = len(A_ ) if isinstance(A_ , A_ ) else 1
# get prompt text embeddings
UpperCamelCase : Any = self.tokenizer(
A_ , padding="max_length" , truncation=A_ , max_length=77 , return_attention_mask=A_ , add_special_tokens=A_ , return_tensors="pt" , )
UpperCamelCase : List[Any] = text_inputs.input_ids
UpperCamelCase : List[Any] = self.tokenizer(A_ , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(A_ , A_ ):
UpperCamelCase : Union[str, Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCamelCase : str = text_input_ids.to(A_ )
UpperCamelCase : int = text_inputs.attention_mask.to(A_ )
UpperCamelCase : Optional[int] = self.text_encoder(
input_ids=A_ , attention_mask=A_ )
UpperCamelCase : Optional[Any] = prompt_embeds.repeat_interleave(A_ , dim=0 )
UpperCamelCase : Any = text_encoder_hidden_states.repeat_interleave(A_ , dim=0 )
UpperCamelCase : int = text_mask.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase : List[str]
if negative_prompt is None:
UpperCamelCase : Optional[Any] = [""] * batch_size
elif type(A_ ) is not type(A_ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(A_ )} !="""
F""" {type(A_ )}.""" )
elif isinstance(A_ , A_ ):
UpperCamelCase : Union[str, Any] = [negative_prompt]
elif batch_size != len(A_ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(A_ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
UpperCamelCase : Optional[int] = negative_prompt
UpperCamelCase : Tuple = self.tokenizer(
A_ , padding="max_length" , max_length=77 , truncation=A_ , return_attention_mask=A_ , add_special_tokens=A_ , return_tensors="pt" , )
UpperCamelCase : Optional[Any] = uncond_input.input_ids.to(A_ )
UpperCamelCase : Tuple = uncond_input.attention_mask.to(A_ )
UpperCamelCase : Optional[int] = self.text_encoder(
input_ids=A_ , attention_mask=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase : int = negative_prompt_embeds.shape[1]
UpperCamelCase : Any = negative_prompt_embeds.repeat(1 , A_ )
UpperCamelCase : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ )
UpperCamelCase : List[Any] = uncond_text_encoder_hidden_states.shape[1]
UpperCamelCase : int = uncond_text_encoder_hidden_states.repeat(1 , A_ , 1 )
UpperCamelCase : Union[str, Any] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , A_ , -1 )
UpperCamelCase : Optional[int] = uncond_text_mask.repeat_interleave(A_ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase : List[Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
UpperCamelCase : Optional[int] = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
UpperCamelCase : Tuple = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __UpperCamelCase( self , A_=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCamelCase : Dict = torch.device(F"""cuda:{gpu_id}""" )
UpperCamelCase : Optional[int] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
def __UpperCamelCase( self , A_=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCamelCase : Union[str, Any] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase : Tuple = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
UpperCamelCase : Optional[int] = cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ )
if self.safety_checker is not None:
UpperCamelCase : Any = cpu_offload_with_hook(self.safety_checker , A_ , prev_module_hook=A_ )
# We'll offload the last model manually.
UpperCamelCase : str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCamelCase( self ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self , A_ , A_ , A_ , A_ = None , A_ = 512 , A_ = 512 , A_ = 100 , A_ = 4.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , ):
'''simple docstring'''
if isinstance(A_ , A_ ):
UpperCamelCase : Optional[Any] = 1
elif isinstance(A_ , A_ ):
UpperCamelCase : List[Any] = len(A_ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(A_ )}""" )
UpperCamelCase : str = self._execution_device
UpperCamelCase : Dict = batch_size * num_images_per_prompt
UpperCamelCase : Optional[int] = guidance_scale > 1.0
UpperCamelCase : Union[str, Any] = self._encode_prompt(
A_ , A_ , A_ , A_ , A_ )
if isinstance(A_ , A_ ):
UpperCamelCase : Any = torch.cat(A_ , dim=0 )
if isinstance(A_ , A_ ):
UpperCamelCase : int = torch.cat(A_ , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase : int = image_embeds.repeat_interleave(A_ , dim=0 )
UpperCamelCase : Optional[Any] = negative_image_embeds.repeat_interleave(A_ , dim=0 )
UpperCamelCase : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=A_ )
self.scheduler.set_timesteps(A_ , device=A_ )
UpperCamelCase : Any = self.scheduler.timesteps
UpperCamelCase : Tuple = self.unet.config.in_channels
UpperCamelCase : Union[str, Any] = get_new_h_w(A_ , A_ , self.movq_scale_factor )
# create initial latent
UpperCamelCase : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , A_ , A_ , A_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase : Dict = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
UpperCamelCase : int = self.unet(
sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0]
if do_classifier_free_guidance:
UpperCamelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase : str = noise_pred.chunk(2 )
UpperCamelCase : Optional[int] = variance_pred.chunk(2 )
UpperCamelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase : Optional[int] = self.scheduler.step(
A_ , A_ , A_ , generator=A_ , ).prev_sample
# post-processing
UpperCamelCase : Tuple = self.movq.decode(A_ , force_not_quantize=A_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCamelCase : Tuple = image * 0.5 + 0.5
UpperCamelCase : Union[str, Any] = image.clamp(0 , 1 )
UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase : Tuple = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 718
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def A_ ( _lowerCAmelCase ) -> Union[str, Any]: # picklable for multiprocessing
return x.sum()
def A_ ( _lowerCAmelCase ) -> Optional[Any]: # picklable for multiprocessing
return i + 1
@dataclass
class A__ :
_UpperCAmelCase :int
_UpperCAmelCase :str
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[Any] = []
UpperCamelCase : List[Any] = 1
UpperCamelCase : Tuple = [1, 2]
UpperCamelCase : Optional[Any] = {"a": 1, "b": 2}
UpperCamelCase : Optional[Any] = {"a": [1, 2], "b": [3, 4]}
UpperCamelCase : Any = {"a": {"1": 1}, "b": 2}
UpperCamelCase : List[str] = {"a": 1, "b": 2, "c": 3, "d": 4}
UpperCamelCase : Dict = {}
UpperCamelCase : Any = []
UpperCamelCase : Any = 2
UpperCamelCase : Any = [2, 3]
UpperCamelCase : Optional[Any] = {"a": 2, "b": 3}
UpperCamelCase : List[Any] = {"a": [2, 3], "b": [4, 5]}
UpperCamelCase : Tuple = {"a": {"1": 2}, "b": 3}
UpperCamelCase : Dict = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
UpperCamelCase : List[str] = 2
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
UpperCamelCase : List[str] = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
UpperCamelCase : int = {"a": 2, "b": 0, "c": 2}
UpperCamelCase : Union[str, Any] = {
"a": np.eye(2 ).astype(A_ ),
"b": np.zeros(3 ).astype(A_ ),
"c": np.ones(2 ).astype(A_ ),
}
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(A_ ): # can't pickle a local lambda
map_nested(lambda A_ : x + 1 , A_ , num_proc=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {"a": 1, "b": 2}
UpperCamelCase : List[Any] = {"a": 3, "b": 4}
UpperCamelCase : Tuple = {"a": 5, "b": 6}
UpperCamelCase : Union[str, Any] = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(A_ , A_ , A_ ) ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
class A__ :
_UpperCAmelCase :str = 'bar'
UpperCamelCase : List[Any] = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(A_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
UpperCamelCase : Union[str, Any] = {F"""{i}""": i for i in range(_lowerCAmelCase )}
UpperCamelCase : List[str] = map_nested(lambda _lowerCAmelCase : x + 10 , _lowerCAmelCase , num_proc=_lowerCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A__ ( __snake_case ):
@require_tf
def __UpperCamelCase( self ):
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase : int = layers.Dense(2 )
def gen_random_output():
UpperCamelCase : Optional[Any] = tf.random.uniform((1, 3) )
return model(A_ ).numpy()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : List[Any] = gen_random_output()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : Dict = gen_random_output()
UpperCamelCase : Optional[int] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
import torch
def gen_random_output():
UpperCamelCase : Optional[Any] = torch.nn.Linear(3 , 2 )
UpperCamelCase : Dict = torch.rand(1 , 3 )
return model(A_ ).detach().numpy()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : Dict = gen_random_output()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : Optional[int] = gen_random_output()
UpperCamelCase : List[Any] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __UpperCamelCase( self ):
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase : Optional[Any] = gen_random_output()
with temp_seed(42 ):
UpperCamelCase : Optional[Any] = gen_random_output()
UpperCamelCase : Optional[Any] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Optional[Any] = NestedDataStructure(_lowerCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
UpperCamelCase : Dict = NestedDataStructure(_lowerCAmelCase ).flatten()
assert output == expected_output
def A_ ( ) -> List[Any]:
UpperCamelCase : str = A(x=1 , y="foobar" )
UpperCamelCase : Tuple = {"x": 1, "y": "foobar"}
assert asdict(_lowerCAmelCase ) == expected_output
UpperCamelCase : List[str] = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
UpperCamelCase : Tuple = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(_lowerCAmelCase ) == expected_output
with pytest.raises(_lowerCAmelCase ):
asdict([1, A(x=10 , y="foo" )] )
def A_ ( _lowerCAmelCase ) -> Tuple:
return text.split()
def A_ ( _lowerCAmelCase ) -> Dict:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def A_ ( ) -> str:
with Pool(2 ) as pool:
UpperCamelCase : List[str] = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase : Dict = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase : Any = []
for yield_time, content in iflatmap_unordered(
_lowerCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_lowerCAmelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(_lowerCAmelCase ) == 4
| 38
| 0
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__lowerCamelCase : Dict = logging.getLogger(__name__)
class A__ ( __snake_case ):
_UpperCAmelCase :Any = 'sequence-classification'
def __init__( self , A_ ):
'''simple docstring'''
if type(A_ ) == dict:
UpperCamelCase : Any = Namespace(**A_ )
UpperCamelCase : Optional[int] = glue_output_modes[hparams.task]
UpperCamelCase : Any = glue_tasks_num_labels[hparams.task]
super().__init__(A_ , A_ , self.mode )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
return self.model(**A_ )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Any = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCamelCase : Optional[int] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
UpperCamelCase : List[Any] = self(**A_ )
UpperCamelCase : Dict = outputs[0]
UpperCamelCase : Optional[Any] = self.trainer.lr_schedulers[0]["scheduler"]
UpperCamelCase : int = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.hparams
UpperCamelCase : Tuple = processors[args.task]()
UpperCamelCase : List[str] = processor.get_labels()
for mode in ["train", "dev"]:
UpperCamelCase : str = self._feature_file(A_ )
if os.path.exists(A_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , A_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
UpperCamelCase : Optional[Any] = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
UpperCamelCase : Any = convert_examples_to_features(
A_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , A_ )
torch.save(A_ , A_ )
def __UpperCamelCase( self , A_ , A_ , A_ = False ):
'''simple docstring'''
UpperCamelCase : Dict = "dev" if mode == "test" else mode
UpperCamelCase : str = self._feature_file(A_ )
logger.info("Loading features from cached file %s" , A_ )
UpperCamelCase : Dict = torch.load(A_ )
UpperCamelCase : List[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCamelCase : Tuple = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
UpperCamelCase : Optional[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
UpperCamelCase : List[str] = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
UpperCamelCase : List[str] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(A_ , A_ , A_ , A_ ) , batch_size=A_ , shuffle=A_ , )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCamelCase : Union[str, Any] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
UpperCamelCase : Tuple = self(**A_ )
UpperCamelCase : Dict = outputs[:2]
UpperCamelCase : Dict = logits.detach().cpu().numpy()
UpperCamelCase : int = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
UpperCamelCase : Optional[int] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
UpperCamelCase : Any = np.argmax(A_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
UpperCamelCase : List[str] = np.squeeze(A_ )
UpperCamelCase : Union[str, Any] = np.concatenate([x["target"] for x in outputs] , axis=0 )
UpperCamelCase : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCamelCase : Any = [[] for _ in range(out_label_ids.shape[0] )]
UpperCamelCase : List[str] = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , A_ , A_ )}
UpperCamelCase : Optional[Any] = dict(results.items() )
UpperCamelCase : str = results
return ret, preds_list, out_label_list
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = self._eval_end(A_ )
UpperCamelCase : Union[str, Any] = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = self._eval_end(A_ )
UpperCamelCase : List[Any] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase( A_ , A_ ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(A_ , A_ )
parser.add_argument(
"--max_seq_length" , default=128 , type=A_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=A_ , required=A_ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=A_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def A_ ( ) -> Optional[int]:
UpperCamelCase : Dict = argparse.ArgumentParser()
add_generic_args(_lowerCAmelCase , os.getcwd() )
UpperCamelCase : Optional[int] = GLUETransformer.add_model_specific_args(_lowerCAmelCase , os.getcwd() )
UpperCamelCase : List[str] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
UpperCamelCase : List[Any] = os.path.join(
"./results" , F"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , )
os.makedirs(args.output_dir )
UpperCamelCase : Optional[int] = GLUETransformer(_lowerCAmelCase )
UpperCamelCase : str = generic_train(_lowerCAmelCase , _lowerCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
UpperCamelCase : List[str] = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_lowerCAmelCase ) )
UpperCamelCase : Tuple = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 719
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Tuple = ['note_seq']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["note_seq"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
| 38
| 0
|
from __future__ import annotations
def A_ ( _lowerCAmelCase ) -> float:
if not nums:
raise ValueError("List is empty" )
return sum(_lowerCAmelCase ) / len(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
import math
import tensorflow as tf
from packaging import version
def A_ ( _lowerCAmelCase ) -> Any:
UpperCamelCase : List[Any] = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : Any = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def A_ ( _lowerCAmelCase ) -> Dict:
UpperCamelCase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(math.pi , x.dtype )
UpperCamelCase : Optional[Any] = tf.cast(0.044_715 , x.dtype )
UpperCamelCase : int = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_lowerCAmelCase , 3 )) ))
return x * cdf
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : str = tf.convert_to_tensor(_lowerCAmelCase )
return x * tf.tanh(tf.math.softplus(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Tuple = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(0.044_715 , x.dtype )
UpperCamelCase : Optional[Any] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : Any = tf.convert_to_tensor(_lowerCAmelCase )
UpperCamelCase : List[Any] = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def A_ ( _lowerCAmelCase ) -> List[Any]:
return tf.clip_by_value(_gelu(_lowerCAmelCase ) , -10 , 10 )
def A_ ( _lowerCAmelCase , _lowerCAmelCase=-1 ) -> str:
UpperCamelCase , UpperCamelCase : List[Any] = tf.split(_lowerCAmelCase , 2 , axis=_lowerCAmelCase )
return a * tf.math.sigmoid(_lowerCAmelCase )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def A_ ( _lowerCAmelCase ) -> Any:
return tf.keras.activations.gelu(_lowerCAmelCase , approximate=_lowerCAmelCase )
__lowerCamelCase : Optional[int] = tf.keras.activations.gelu
__lowerCamelCase : int = approximate_gelu_wrap
else:
__lowerCamelCase : List[Any] = _gelu
__lowerCamelCase : Optional[Any] = _gelu_new
__lowerCamelCase : Any = {
"""gelu""": gelu,
"""gelu_10""": gelu_aa,
"""gelu_fast""": gelu_fast,
"""gelu_new""": gelu_new,
"""glu""": glu,
"""mish""": mish,
"""quick_gelu""": quick_gelu,
"""relu""": tf.keras.activations.relu,
"""sigmoid""": tf.keras.activations.sigmoid,
"""silu""": tf.keras.activations.swish,
"""swish""": tf.keras.activations.swish,
"""tanh""": tf.keras.activations.tanh,
}
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 38
| 0
|
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
class A__ ( __snake_case ):
'''simple docstring'''
_UpperCAmelCase :Any = ['input_values', 'padding_mask']
def __init__( self , A_ = 1 , A_ = 2_4000 , A_ = 0.0 , A_ = None , A_ = None , **A_ , ):
'''simple docstring'''
super().__init__(feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ )
UpperCamelCase : Any = chunk_length_s
UpperCamelCase : Dict = overlap
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , A_ , A_ = None , A_ = False , A_ = None , A_ = None , A_ = None , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
UpperCamelCase : Tuple = True
UpperCamelCase : Optional[Any] = bool(
isinstance(A_ , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
UpperCamelCase : Optional[Any] = [np.asarray(A_ , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(A_ , np.ndarray ):
UpperCamelCase : List[str] = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
UpperCamelCase : Tuple = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Dict = [np.asarray(A_ ).T]
# verify inputs are valid
for idx, example in enumerate(A_ ):
if example.ndim > 2:
raise ValueError(F"""Expected input shape (channels, length) but got shape {example.shape}""" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F"""Expected mono audio but example has {example.shape[-1]} channels""" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F"""Expected stereo audio but example has {example.shape[-1]} channels""" )
UpperCamelCase : int = None
UpperCamelCase : Any = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
UpperCamelCase : Dict = min(array.shape[0] for array in raw_audio )
UpperCamelCase : Optional[Any] = int(np.floor(max_length / self.chunk_stride ) )
UpperCamelCase : str = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
UpperCamelCase : Any = max(array.shape[0] for array in raw_audio )
UpperCamelCase : str = int(np.ceil(max_length / self.chunk_stride ) )
UpperCamelCase : int = (nb_step - 1) * self.chunk_stride + self.chunk_length
UpperCamelCase : Optional[int] = "max_length"
else:
UpperCamelCase : int = input_values
# normal padding on batch
if padded_inputs is None:
UpperCamelCase : Optional[int] = self.pad(
A_ , max_length=A_ , truncation=A_ , padding=A_ , return_attention_mask=A_ , )
if padding:
UpperCamelCase : int = padded_inputs.pop("attention_mask" )
UpperCamelCase : Optional[int] = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
UpperCamelCase : Dict = example[..., None]
input_values.append(example.T )
UpperCamelCase : int = input_values
if return_tensors is not None:
UpperCamelCase : Optional[Any] = padded_inputs.convert_to_tensors(A_ )
return padded_inputs
| 721
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :str = KandinskyVaaPipeline
_UpperCAmelCase :str = [
'image_embeds',
'negative_image_embeds',
]
_UpperCAmelCase :str = ['image_embeds', 'negative_image_embeds']
_UpperCAmelCase :List[str] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCAmelCase :List[str] = False
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 100
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : List[str] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase : Dict = UNetaDConditionModel(**A_ )
return model
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.dummy_unet
UpperCamelCase : Optional[Any] = self.dummy_movq
UpperCamelCase : Dict = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , )
UpperCamelCase : Tuple = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCamelCase( self , A_ , A_=0 ):
'''simple docstring'''
UpperCamelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
if str(A_ ).startswith("mps" ):
UpperCamelCase : Optional[Any] = torch.manual_seed(A_ )
else:
UpperCamelCase : List[Any] = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase : Optional[int] = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = "cpu"
UpperCamelCase : List[str] = self.get_dummy_components()
UpperCamelCase : Tuple = self.pipeline_class(**A_ )
UpperCamelCase : List[str] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Dict = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase : Optional[int] = output.images
UpperCamelCase : int = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : int = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
UpperCamelCase : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
UpperCamelCase : Dict = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCamelCase : Tuple = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase : str = "red cat, 4k photo"
UpperCamelCase : str = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase : Tuple = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase : int = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase : Tuple = pipeline(
image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , output_type="np" , )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A_ , A_ )
| 38
| 0
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = """Hello world! cécé herlolip"""
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
UpperCamelCase : List[Any] = FairseqRobertaModel.from_pretrained(_lowerCAmelCase )
roberta.eval() # disable dropout
UpperCamelCase : Any = roberta.model.encoder.sentence_encoder
UpperCamelCase : int = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
UpperCamelCase : Dict = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" , _lowerCAmelCase )
UpperCamelCase : Any = XLMRobertaXLForSequenceClassification(_lowerCAmelCase ) if classification_head else XLMRobertaXLForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCamelCase : List[Any] = roberta_sent_encoder.embed_tokens.weight
UpperCamelCase : Dict = roberta_sent_encoder.embed_positions.weight
UpperCamelCase : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
UpperCamelCase : Tuple = roberta_sent_encoder.layer_norm.weight
UpperCamelCase : Union[str, Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCamelCase : BertLayer = model.roberta.encoder.layer[i]
UpperCamelCase : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
UpperCamelCase : RobertaAttention = layer.attention
UpperCamelCase : int = roberta_layer.self_attn_layer_norm.weight
UpperCamelCase : Union[str, Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
UpperCamelCase : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
UpperCamelCase : Dict = roberta_layer.self_attn.q_proj.weight
UpperCamelCase : Optional[int] = roberta_layer.self_attn.q_proj.bias
UpperCamelCase : Optional[Any] = roberta_layer.self_attn.k_proj.weight
UpperCamelCase : int = roberta_layer.self_attn.k_proj.bias
UpperCamelCase : Optional[int] = roberta_layer.self_attn.v_proj.weight
UpperCamelCase : Any = roberta_layer.self_attn.v_proj.bias
# self-attention output
UpperCamelCase : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
UpperCamelCase : str = roberta_layer.self_attn.out_proj.weight
UpperCamelCase : Tuple = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
UpperCamelCase : Any = roberta_layer.final_layer_norm.weight
UpperCamelCase : Dict = roberta_layer.final_layer_norm.bias
# intermediate
UpperCamelCase : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCamelCase : List[str] = roberta_layer.fca.weight
UpperCamelCase : Union[str, Any] = roberta_layer.fca.bias
# output
UpperCamelCase : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCamelCase : str = roberta_layer.fca.weight
UpperCamelCase : Dict = roberta_layer.fca.bias
# end of layer
if classification_head:
UpperCamelCase : Dict = roberta.model.classification_heads["mnli"].dense.weight
UpperCamelCase : List[str] = roberta.model.classification_heads["mnli"].dense.bias
UpperCamelCase : int = roberta.model.classification_heads["mnli"].out_proj.weight
UpperCamelCase : Dict = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
UpperCamelCase : Union[str, Any] = roberta.model.encoder.lm_head.dense.weight
UpperCamelCase : Union[str, Any] = roberta.model.encoder.lm_head.dense.bias
UpperCamelCase : Tuple = roberta.model.encoder.lm_head.layer_norm.weight
UpperCamelCase : Tuple = roberta.model.encoder.lm_head.layer_norm.bias
UpperCamelCase : Optional[Any] = roberta.model.encoder.lm_head.weight
UpperCamelCase : Tuple = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCamelCase : torch.Tensor = roberta.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
UpperCamelCase : str = model(_lowerCAmelCase )[0]
if classification_head:
UpperCamelCase : str = roberta.model.classification_heads["mnli"](roberta.extract_features(_lowerCAmelCase ) )
else:
UpperCamelCase : Union[str, Any] = roberta.model(_lowerCAmelCase )[0]
print(our_output.shape , their_output.shape )
UpperCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
UpperCamelCase : str = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
__lowerCamelCase : Dict = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 700
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A_ ( ) -> Dict:
UpperCamelCase : Tuple = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_lowerCAmelCase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_lowerCAmelCase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_lowerCAmelCase )
return parser.parse_args()
def A_ ( ) -> Optional[int]:
UpperCamelCase : Tuple = parse_args()
# Import training_script as a module.
UpperCamelCase : Union[str, Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCamelCase : List[Any] = script_fpath.stem
UpperCamelCase : Optional[Any] = importlib.import_module(_lowerCAmelCase )
# Patch sys.argv
UpperCamelCase : List[Any] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 38
| 0
|
import argparse
import os
import re
__lowerCamelCase : Any = """src/diffusers"""
# Pattern that looks at the indentation in a line.
__lowerCamelCase : Optional[int] = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
__lowerCamelCase : Optional[Any] = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__lowerCamelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
__lowerCamelCase : Optional[Any] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__lowerCamelCase : List[Any] = re.compile(r"""\[([^\]]+)\]""")
def A_ ( _lowerCAmelCase ):
UpperCamelCase : List[str] = _re_indent.search(_lowerCAmelCase )
return "" if search is None else search.groups()[0]
def A_ ( _lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase=None , _lowerCAmelCase=None ):
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Any = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCAmelCase ):
index += 1
UpperCamelCase : Tuple = ["\n".join(lines[:index] )]
else:
UpperCamelCase : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCamelCase : Optional[Any] = [lines[index]]
index += 1
while index < len(_lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(_lowerCAmelCase ) )
if index < len(_lowerCAmelCase ) - 1:
UpperCamelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
UpperCamelCase : Optional[Any] = []
else:
blocks.append("\n".join(_lowerCAmelCase ) )
UpperCamelCase : Optional[int] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCAmelCase ) > 0:
blocks.append("\n".join(_lowerCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCAmelCase ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def A_ ( _lowerCAmelCase ):
def _inner(_lowerCAmelCase ):
return key(_lowerCAmelCase ).lower().replace("_" , "" )
return _inner
def A_ ( _lowerCAmelCase , _lowerCAmelCase=None ):
# If no key is provided, we use a noop.
def noop(_lowerCAmelCase ):
return x
if key is None:
UpperCamelCase : Optional[Any] = noop
# Constants are all uppercase, they go first.
UpperCamelCase : Optional[Any] = [obj for obj in objects if key(_lowerCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCamelCase : int = [obj for obj in objects if key(_lowerCAmelCase )[0].isupper() and not key(_lowerCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCamelCase : Optional[Any] = [obj for obj in objects if not key(_lowerCAmelCase )[0].isupper()]
UpperCamelCase : List[str] = ignore_underscore(_lowerCAmelCase )
return sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase )
def A_ ( _lowerCAmelCase ):
# This inner function sort imports between [ ].
def _replace(_lowerCAmelCase ):
UpperCamelCase : Any = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
UpperCamelCase : Dict = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase : Tuple = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(_lowerCAmelCase )] ) + "]"
UpperCamelCase : Optional[int] = import_statement.split("\n" )
if len(_lowerCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCamelCase : List[Any] = 2 if lines[1].strip() == "[" else 1
UpperCamelCase : Tuple = [(i, _re_strip_line.search(_lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCamelCase : int = sort_objects(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )
UpperCamelCase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCamelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] )
else:
UpperCamelCase : Any = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase : Union[str, Any] = keys[:-1]
UpperCamelCase : Union[str, Any] = get_indent(lines[1] ) + ", ".join([F"""\"{k}\"""" for k in sort_objects(_lowerCAmelCase )] )
return "\n".join(_lowerCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
UpperCamelCase : Tuple = _re_bracket_content.sub(_replace , _lowerCAmelCase )
return import_statement
def A_ ( _lowerCAmelCase , _lowerCAmelCase=True ):
with open(_lowerCAmelCase , "r" ) as f:
UpperCamelCase : List[str] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCamelCase : int = split_code_in_indented_blocks(
_lowerCAmelCase , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCamelCase : Optional[int] = main_blocks[block_idx]
UpperCamelCase : List[str] = block.split("\n" )
# Get to the start of the imports.
UpperCamelCase : List[Any] = 0
while line_idx < len(_lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCamelCase : int = len(_lowerCAmelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCamelCase : Any = "\n".join(block_lines[line_idx:-1] )
UpperCamelCase : Optional[int] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCamelCase : Union[str, Any] = split_code_in_indented_blocks(_lowerCAmelCase , indent_level=_lowerCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCamelCase : Dict = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCamelCase : List[Any] = [(pattern.search(_lowerCAmelCase ).groups()[0] if pattern.search(_lowerCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCamelCase : List[str] = [(i, key) for i, key in enumerate(_lowerCAmelCase ) if key is not None]
UpperCamelCase : int = [x[0] for x in sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCamelCase : List[Any] = 0
UpperCamelCase : Optional[int] = []
for i in range(len(_lowerCAmelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
UpperCamelCase : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
UpperCamelCase : Tuple = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCAmelCase ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(_lowerCAmelCase , "w" ) as f:
f.write("\n".join(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase=True ):
UpperCamelCase : List[Any] = []
for root, _, files in os.walk(_lowerCAmelCase ):
if "__init__.py" in files:
UpperCamelCase : str = sort_imports(os.path.join(_lowerCAmelCase , "__init__.py" ) , check_only=_lowerCAmelCase )
if result:
UpperCamelCase : str = [os.path.join(_lowerCAmelCase , "__init__.py" )]
if len(_lowerCAmelCase ) > 0:
raise ValueError(F"""Would overwrite {len(_lowerCAmelCase )} files, run `make style`.""" )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
__lowerCamelCase : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 701
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Union[str, Any] = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 38
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : int = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class A__ ( __snake_case ):
_UpperCAmelCase :List[Any] = 'mobilenet_v1'
def __init__( self , A_=3 , A_=224 , A_=1.0 , A_=8 , A_="relu6" , A_=True , A_=0.9_99 , A_=0.02 , A_=0.0_01 , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
UpperCamelCase : List[str] = num_channels
UpperCamelCase : Any = image_size
UpperCamelCase : Optional[Any] = depth_multiplier
UpperCamelCase : Union[str, Any] = min_depth
UpperCamelCase : int = hidden_act
UpperCamelCase : Union[str, Any] = tf_padding
UpperCamelCase : Optional[Any] = classifier_dropout_prob
UpperCamelCase : int = initializer_range
UpperCamelCase : Tuple = layer_norm_eps
class A__ ( __snake_case ):
_UpperCAmelCase :Any = version.parse('1.11' )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 1e-4
| 702
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=10 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=None , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = size if size is not None else {"shortest_edge": 18}
UpperCamelCase : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCamelCase : Optional[Any] = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : List[Any] = num_channels
UpperCamelCase : Union[str, Any] = num_frames
UpperCamelCase : Any = image_size
UpperCamelCase : Tuple = min_resolution
UpperCamelCase : Optional[Any] = max_resolution
UpperCamelCase : Any = do_resize
UpperCamelCase : Tuple = size
UpperCamelCase : List[Any] = do_normalize
UpperCamelCase : Optional[int] = image_mean
UpperCamelCase : Any = image_std
UpperCamelCase : str = crop_size
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[str] = VivitImageProcessor if is_vision_available() else None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = VivitImageProcessingTester(self )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "do_center_crop" ) )
self.assertTrue(hasattr(A_ , "size" ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
UpperCamelCase : Any = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : str = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : str = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : Any = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : List[Any] = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 38
| 0
|
import re
from filelock import FileLock
try:
import nltk
__lowerCamelCase : int = True
except (ImportError, ModuleNotFoundError):
__lowerCamelCase : Any = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def A_ ( _lowerCAmelCase ) -> str:
re.sub("<n>" , "" , _lowerCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCAmelCase ) )
| 703
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__lowerCamelCase : Dict = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__lowerCamelCase : Tuple = {
"""facebook/blenderbot_small-90M""": 512,
}
class A__ ( __snake_case ):
_UpperCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase :Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Optional[Any] = BlenderbotSmallTokenizer
def __init__( self , A_=None , A_=None , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|endoftext|>" , A_=False , A_=True , **A_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=A_ , merges=A_ , add_prefix_space=A_ , trim_offsets=A_ , ) , bos_token=A_ , eos_token=A_ , unk_token=A_ , **A_ , )
UpperCamelCase : Union[str, Any] = add_prefix_space
def __UpperCamelCase( self , A_ , A_=None ):
'''simple docstring'''
UpperCamelCase : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Tuple = [self.sep_token_id]
UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 38
| 0
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__lowerCamelCase : Union[str, Any] = logging.getLogger(__name__)
def A_ ( _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=16 , _lowerCAmelCase = 10 , _lowerCAmelCase = 2 ) -> Dict:
def get_dataset(_lowerCAmelCase ):
UpperCamelCase : str = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(_lowerCAmelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCamelCase : Tuple = get_dataset(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = get_dataset(_lowerCAmelCase )
UpperCamelCase : int = DataLoader(_lowerCAmelCase , shuffle=_lowerCAmelCase , batch_size=_lowerCAmelCase , num_workers=4 )
UpperCamelCase : Union[str, Any] = DataLoader(_lowerCAmelCase , shuffle=_lowerCAmelCase , batch_size=_lowerCAmelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ) -> Optional[Any]:
UpperCamelCase : List[str] = []
for epoch in range(_lowerCAmelCase ):
# Train quickly
model.train()
for batch in dataloader:
UpperCamelCase : str = batch
UpperCamelCase : int = model(_lowerCAmelCase )
UpperCamelCase : Tuple = torch.nn.functional.mse_loss(_lowerCAmelCase , _lowerCAmelCase )
accelerator.backward(_lowerCAmelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class A__ ( nn.Module ):
def __init__( self ):
'''simple docstring'''
super().__init__()
UpperCamelCase : int = nn.Parameter(torch.randn(1 ) )
UpperCamelCase : int = nn.Parameter(torch.randn(1 ) )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return x * self.a + self.b
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase : List[Any] = DummyModel()
UpperCamelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase : int = dummy_dataloaders()
UpperCamelCase : Union[str, Any] = ProjectConfiguration(total_limit=1 , project_dir=A_ , automatic_checkpoint_naming=A_ )
# Train baseline
UpperCamelCase : Union[str, Any] = Accelerator(project_config=A_ )
UpperCamelCase : Dict = accelerator.prepare(
A_ , A_ , A_ , A_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCamelCase( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase : List[Any] = DummyModel()
UpperCamelCase : Any = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase : Dict = dummy_dataloaders()
# Train baseline
UpperCamelCase : List[str] = Accelerator()
UpperCamelCase : List[str] = accelerator.prepare(
A_ , A_ , A_ , A_ )
# Save initial
UpperCamelCase : Tuple = os.path.join(A_ , "initial" )
accelerator.save_state(A_ )
(UpperCamelCase) : Any = model.a.item(), model.b.item()
UpperCamelCase : Optional[int] = optimizer.state_dict()
UpperCamelCase : str = train(3 , A_ , A_ , A_ , A_ )
(UpperCamelCase) : Tuple = model.a.item(), model.b.item()
UpperCamelCase : str = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCamelCase : Optional[int] = DummyModel()
UpperCamelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase : Any = dummy_dataloaders()
UpperCamelCase : List[str] = Accelerator()
UpperCamelCase : Optional[Any] = accelerator.prepare(
A_ , A_ , A_ , A_ )
accelerator.load_state(A_ )
(UpperCamelCase) : Optional[Any] = model.a.item(), model.b.item()
UpperCamelCase : List[str] = optimizer.state_dict()
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
UpperCamelCase : Tuple = train(2 , A_ , A_ , A_ , A_ )
# Save everything
UpperCamelCase : Optional[int] = os.path.join(A_ , "checkpoint" )
accelerator.save_state(A_ )
# Load everything back in and make sure all states work
accelerator.load_state(A_ )
test_rands += train(1 , A_ , A_ , A_ , A_ )
(UpperCamelCase) : Any = model.a.item(), model.b.item()
UpperCamelCase : Any = optimizer.state_dict()
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase : Dict = DummyModel()
UpperCamelCase : str = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase : int = dummy_dataloaders()
UpperCamelCase : Dict = ProjectConfiguration(automatic_checkpoint_naming=A_ )
# Train baseline
UpperCamelCase : int = Accelerator(project_dir=A_ , project_config=A_ )
UpperCamelCase : str = accelerator.prepare(
A_ , A_ , A_ , A_ )
# Save initial
accelerator.save_state()
(UpperCamelCase) : Optional[Any] = model.a.item(), model.b.item()
UpperCamelCase : List[str] = optimizer.state_dict()
UpperCamelCase : Tuple = train(3 , A_ , A_ , A_ , A_ )
(UpperCamelCase) : List[str] = model.a.item(), model.b.item()
UpperCamelCase : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCamelCase : List[Any] = DummyModel()
UpperCamelCase : Any = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase : Tuple = dummy_dataloaders()
UpperCamelCase : Dict = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A_ )
UpperCamelCase : int = Accelerator(project_dir=A_ , project_config=A_ )
UpperCamelCase : List[str] = accelerator.prepare(
A_ , A_ , A_ , A_ )
accelerator.load_state(os.path.join(A_ , "checkpoints" , "checkpoint_0" ) )
(UpperCamelCase) : Union[str, Any] = model.a.item(), model.b.item()
UpperCamelCase : Optional[int] = optimizer.state_dict()
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
UpperCamelCase : str = train(2 , A_ , A_ , A_ , A_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A_ , "checkpoints" , "checkpoint_1" ) )
test_rands += train(1 , A_ , A_ , A_ , A_ )
(UpperCamelCase) : List[str] = model.a.item(), model.b.item()
UpperCamelCase : Any = optimizer.state_dict()
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = torch.tensor([1, 2, 3] )
UpperCamelCase : Any = torch.tensor([2, 3, 4] )
UpperCamelCase : Dict = DummyModel()
UpperCamelCase : Union[str, Any] = torch.optim.Adam(net.parameters() )
UpperCamelCase : List[str] = Accelerator()
with self.assertRaises(A_ ) as ve:
accelerator.register_for_checkpointing(A_ , A_ , A_ , A_ )
UpperCamelCase : List[str] = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def __UpperCamelCase( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase : Any = DummyModel()
UpperCamelCase : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase : Dict = torch.optim.lr_scheduler.StepLR(A_ , step_size=1 , gamma=0.99 )
UpperCamelCase : Optional[int] = dummy_dataloaders()
UpperCamelCase : Any = ProjectConfiguration(automatic_checkpoint_naming=A_ )
# Train baseline
UpperCamelCase : Optional[Any] = Accelerator(project_dir=A_ , project_config=A_ )
UpperCamelCase : int = accelerator.prepare(
A_ , A_ , A_ , A_ , A_ )
# Save initial
accelerator.save_state()
UpperCamelCase : Union[str, Any] = scheduler.state_dict()
train(3 , A_ , A_ , A_ , A_ , A_ )
self.assertNotEqual(A_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A_ , "checkpoints" , "checkpoint_0" ) )
self.assertEqual(A_ , scheduler.state_dict() )
def __UpperCamelCase( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase : List[str] = DummyModel()
UpperCamelCase : int = ProjectConfiguration(automatic_checkpoint_naming=A_ , total_limit=2 )
# Train baseline
UpperCamelCase : Optional[int] = Accelerator(project_dir=A_ , project_config=A_ )
UpperCamelCase : Tuple = accelerator.prepare(A_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A_ , "checkpoints" , "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(A_ , "checkpoints" , "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(A_ , "checkpoints" , "checkpoint_10" ) ) )
@require_cuda
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(A_ , env=os.environ.copy() )
if __name__ == "__main__":
__lowerCamelCase : str = """/tmp/accelerate/state_checkpointing"""
__lowerCamelCase : int = DummyModel()
__lowerCamelCase : Optional[Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__lowerCamelCase : Any = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__lowerCamelCase : Dict = dummy_dataloaders()
__lowerCamelCase : Any = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__lowerCamelCase : Any = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__lowerCamelCase : Any = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__lowerCamelCase : List[Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__lowerCamelCase : int = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
__lowerCamelCase : List[str] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
__lowerCamelCase : Optional[Any] = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
__lowerCamelCase : Optional[int] = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 704
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : int = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 38
| 0
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class A__ ( __snake_case ):
_UpperCAmelCase :Any = 'Wav2Vec2FeatureExtractor'
_UpperCAmelCase :List[str] = 'AutoTokenizer'
def __init__( self , A_ , A_ ):
'''simple docstring'''
super().__init__(A_ , A_ )
UpperCamelCase : Union[str, Any] = self.feature_extractor
UpperCamelCase : Optional[int] = False
@classmethod
def __UpperCamelCase( cls , A_ , **A_ ):
'''simple docstring'''
try:
return super().from_pretrained(A_ , **A_ )
except OSError:
warnings.warn(
F"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , A_ , )
UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(A_ , **A_ )
UpperCamelCase : Any = WavaVecaCTCTokenizer.from_pretrained(A_ , **A_ )
return cls(feature_extractor=A_ , tokenizer=A_ )
def __call__( self , *A_ , **A_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*A_ , **A_ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
UpperCamelCase : int = kwargs.pop("raw_speech" )
else:
UpperCamelCase : Any = kwargs.pop("audio" , A_ )
UpperCamelCase : str = kwargs.pop("sampling_rate" , A_ )
UpperCamelCase : Dict = kwargs.pop("text" , A_ )
if len(A_ ) > 0:
UpperCamelCase : str = args[0]
UpperCamelCase : str = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
UpperCamelCase : str = self.feature_extractor(A_ , *A_ , sampling_rate=A_ , **A_ )
if text is not None:
UpperCamelCase : Optional[int] = self.tokenizer(A_ , **A_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCamelCase : Union[str, Any] = encodings["input_ids"]
return inputs
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*A_ , **A_ )
UpperCamelCase : int = kwargs.pop("input_features" , A_ )
UpperCamelCase : Optional[int] = kwargs.pop("labels" , A_ )
if len(A_ ) > 0:
UpperCamelCase : Optional[int] = args[0]
UpperCamelCase : Tuple = args[1:]
if input_features is not None:
UpperCamelCase : Any = self.feature_extractor.pad(A_ , *A_ , **A_ )
if labels is not None:
UpperCamelCase : Dict = self.tokenizer.pad(A_ , **A_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCamelCase : Optional[int] = labels["input_ids"]
return input_features
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
@contextmanager
def __UpperCamelCase( self ):
'''simple docstring'''
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Any = self.tokenizer
yield
UpperCamelCase : Dict = self.feature_extractor
UpperCamelCase : Dict = False
| 705
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__lowerCamelCase : str = None
try:
import msvcrt
except ImportError:
__lowerCamelCase : str = None
try:
import fcntl
except ImportError:
__lowerCamelCase : List[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
__lowerCamelCase : str = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
__lowerCamelCase : Union[str, Any] = """3.0.12"""
__lowerCamelCase : Any = None
def A_ ( ) -> List[Any]:
global _logger
UpperCamelCase : Any = _logger or logging.getLogger(__name__ )
return _logger
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = lock_file
return None
def __str__( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class A__ :
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = lock
return None
def __enter__( self ):
'''simple docstring'''
return self.lock
def __exit__( self , A_ , A_ , A_ ):
'''simple docstring'''
self.lock.release()
return None
class A__ :
def __init__( self , A_ , A_=-1 , A_=None ):
'''simple docstring'''
UpperCamelCase : List[Any] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
UpperCamelCase : Dict = self.hash_filename_if_too_long(A_ , A_ )
# The path to the lock file.
UpperCamelCase : List[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCamelCase : Tuple = None
# The default timeout value.
UpperCamelCase : Optional[Any] = timeout
# We use this lock primarily for the lock counter.
UpperCamelCase : Union[str, Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCamelCase : Dict = 0
return None
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._lock_file
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = float(A_ )
return None
def __UpperCamelCase( self ):
'''simple docstring'''
raise NotImplementedError()
def __UpperCamelCase( self ):
'''simple docstring'''
raise NotImplementedError()
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._lock_file_fd is not None
def __UpperCamelCase( self , A_=None , A_=0.05 ):
'''simple docstring'''
if timeout is None:
UpperCamelCase : Optional[Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCamelCase : Dict = id(self )
UpperCamelCase : List[str] = self._lock_file
UpperCamelCase : int = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(A_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCamelCase : List[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __UpperCamelCase( self , A_=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCamelCase : List[Any] = id(self )
UpperCamelCase : Dict = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
UpperCamelCase : Dict = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self , A_ , A_ , A_ ):
'''simple docstring'''
self.release()
return None
def __del__( self ):
'''simple docstring'''
self.release(force=A_ )
return None
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = os.path.basename(A_ )
if len(A_ ) > max_length and max_length > 0:
UpperCamelCase : Optional[int] = os.path.dirname(A_ )
UpperCamelCase : int = str(hash(A_ ) )
UpperCamelCase : Any = filename[: max_length - len(A_ ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(A_ , A_ )
else:
return path
class A__ ( __snake_case ):
def __init__( self , A_ , A_=-1 , A_=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(A_ , timeout=A_ , max_filename_length=A_ )
UpperCamelCase : List[Any] = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCamelCase : str = os.open(self._lock_file , A_ )
except OSError:
pass
else:
try:
msvcrt.locking(A_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(A_ )
else:
UpperCamelCase : Optional[Any] = fd
return None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self._lock_file_fd
UpperCamelCase : str = None
msvcrt.locking(A_ , msvcrt.LK_UNLCK , 1 )
os.close(A_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class A__ ( __snake_case ):
def __init__( self , A_ , A_=-1 , A_=None ):
'''simple docstring'''
UpperCamelCase : Tuple = os.statvfs(os.path.dirname(A_ ) ).f_namemax
super().__init__(A_ , timeout=A_ , max_filename_length=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCamelCase : int = os.open(self._lock_file , A_ )
try:
fcntl.flock(A_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(A_ )
else:
UpperCamelCase : List[str] = fd
return None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self._lock_file_fd
UpperCamelCase : List[Any] = None
fcntl.flock(A_ , fcntl.LOCK_UN )
os.close(A_ )
return None
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCamelCase : Optional[int] = os.open(self._lock_file , A_ )
except OSError:
pass
else:
UpperCamelCase : Tuple = fd
return None
def __UpperCamelCase( self ):
'''simple docstring'''
os.close(self._lock_file_fd )
UpperCamelCase : str = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__lowerCamelCase : Dict = None
if msvcrt:
__lowerCamelCase : Any = WindowsFileLock
elif fcntl:
__lowerCamelCase : Any = UnixFileLock
else:
__lowerCamelCase : int = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 38
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : str = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class A__ ( __snake_case ):
_UpperCAmelCase :List[str] = 'openai-gpt'
_UpperCAmelCase :Any = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , A_=4_0478 , A_=512 , A_=768 , A_=12 , A_=12 , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.1 , A_=1e-5 , A_=0.02 , A_="cls_index" , A_=True , A_=None , A_=True , A_=0.1 , **A_ , ):
'''simple docstring'''
UpperCamelCase : Tuple = vocab_size
UpperCamelCase : Tuple = n_positions
UpperCamelCase : Tuple = n_embd
UpperCamelCase : Tuple = n_layer
UpperCamelCase : Union[str, Any] = n_head
UpperCamelCase : int = afn
UpperCamelCase : Dict = resid_pdrop
UpperCamelCase : List[Any] = embd_pdrop
UpperCamelCase : Tuple = attn_pdrop
UpperCamelCase : Tuple = layer_norm_epsilon
UpperCamelCase : Any = initializer_range
UpperCamelCase : Dict = summary_type
UpperCamelCase : Optional[Any] = summary_use_proj
UpperCamelCase : int = summary_activation
UpperCamelCase : Any = summary_first_dropout
UpperCamelCase : Union[str, Any] = summary_proj_to_labels
super().__init__(**A_ )
| 706
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ) -> str:
if config_name_or_path is None:
UpperCamelCase : Dict = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
UpperCamelCase : Tuple = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
UpperCamelCase : Tuple = question_encoder_name_or_path
UpperCamelCase : Any = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
UpperCamelCase : Optional[Any] = RagConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : Tuple = AutoConfig.from_pretrained(_lowerCAmelCase )
UpperCamelCase : int = gen_config
UpperCamelCase : Dict = question_encoder_config
UpperCamelCase : Tuple = model_class.from_pretrained_question_encoder_generator(
_lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase )
rag_model.save_pretrained(_lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(_lowerCAmelCase )
# Save tokenizers.
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
__lowerCamelCase : Dict = parser.parse_args()
__lowerCamelCase : Dict = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 38
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.