code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class UpperCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
a : Tuple = """nllb-moe"""
a : Optional[int] = ["""past_key_values"""]
a : Any = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , UpperCamelCase=12_8112 , UpperCamelCase=1024 , UpperCamelCase=12 , UpperCamelCase=4096 , UpperCamelCase=16 , UpperCamelCase=12 , UpperCamelCase=4096 , UpperCamelCase=16 , UpperCamelCase=0.05 , UpperCamelCase=0.05 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase="relu" , UpperCamelCase=1024 , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.02 , UpperCamelCase=2 , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase="float32" , UpperCamelCase=False , UpperCamelCase=128 , UpperCamelCase=64 , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=0.0_01 , UpperCamelCase=0.0_01 , UpperCamelCase="all" , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=1.0 , UpperCamelCase=0.2 , UpperCamelCase=1 , UpperCamelCase=0 , UpperCamelCase=2 , UpperCamelCase=False , **UpperCamelCase , ) -> Optional[int]:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase = router_z_loss_coef
__lowerCAmelCase = router_aux_loss_coef
__lowerCAmelCase = decoder_sparse_step
__lowerCAmelCase = encoder_sparse_step
__lowerCAmelCase = num_experts
__lowerCAmelCase = expert_capacity
__lowerCAmelCase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__lowerCAmelCase = router_dtype
__lowerCAmelCase = router_ignore_padding_tokens
__lowerCAmelCase = batch_prioritized_routing
__lowerCAmelCase = second_expert_policy
__lowerCAmelCase = normalize_router_prob_before_dropping
__lowerCAmelCase = moe_eval_capacity_token_fraction
__lowerCAmelCase = moe_token_dropout
__lowerCAmelCase = output_router_logits
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , )
| 721
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Optional[Any] = """dpr"""
def __init__( self , UpperCamelCase=3_0522 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=1E-12 , UpperCamelCase=0 , UpperCamelCase="absolute" , UpperCamelCase = 0 , **UpperCamelCase , ) -> Tuple:
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = projection_dim
__lowerCAmelCase = position_embedding_type
| 39
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
__lowerCAmelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler("sample_euler" )
__lowerCAmelCase = "A painting of a squirrel eating a burger"
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sd_pipe([prompt] , generator=A_ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
__lowerCAmelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler("sample_euler" )
__lowerCAmelCase = "A painting of a squirrel eating a burger"
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sd_pipe([prompt] , generator=A_ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
__lowerCAmelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
__lowerCAmelCase = "A painting of a squirrel eating a burger"
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sd_pipe(
[prompt] , generator=A_ , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=A_ , )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 700
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
| 0
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowerCAmelCase : Tuple = TypeVar('''T''')
class UpperCAmelCase__ ( Generic[T] ):
a : deque[T] # Cache store of keys
a : set[T] # References of the keys in cache
a : int = 1_0 # Maximum capacity of cache
def __init__( self , UpperCamelCase ) -> Dict:
__lowerCAmelCase = deque()
__lowerCAmelCase = set()
if not n:
__lowerCAmelCase = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
__lowerCAmelCase = n
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[Any]:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__lowerCAmelCase = self.dq_store.pop()
self.key_reference.remove(UpperCamelCase )
else:
self.dq_store.remove(UpperCamelCase )
self.dq_store.appendleft(UpperCamelCase )
self.key_reference.add(UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
for k in self.dq_store:
print(UpperCamelCase )
def __repr__( self ) -> Optional[int]:
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : int = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 701
|
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCAmelCase = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
__lowerCAmelCase = features.copy()
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , split=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
if issubclass(lowerCamelCase , lowerCamelCase ):
__lowerCAmelCase = jsonl_path
elif issubclass(lowerCamelCase , lowerCamelCase ):
__lowerCAmelCase = [jsonl_path]
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : str=("train",) ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase )
for split in splits:
__lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase ( lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : List[str] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read()
_check_json_datasetdict(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_datasetdict(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : int ):
'''simple docstring'''
if split:
__lowerCAmelCase = {split: jsonl_path}
else:
__lowerCAmelCase = "train"
__lowerCAmelCase = {"train": jsonl_path, "test": jsonl_path}
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_datasetdict(lowerCamelCase , lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return json.load(lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
return [json.loads(lowerCamelCase ) for line in buffer]
class UpperCAmelCase__ :
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json_function(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
assert isinstance(exported_content[0] , UpperCamelCase )
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json_function(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
assert isinstance(exported_content[0] , UpperCamelCase )
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase ) == 10
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any:
with pytest.raises(UpperCamelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
__lowerCAmelCase = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}'''
__lowerCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , compression=UpperCamelCase ).write()
with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f:
__lowerCAmelCase = f.read()
with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f:
__lowerCAmelCase = f.read()
assert exported_content == original_content
| 39
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self , UpperCamelCase , UpperCamelCase=7 , UpperCamelCase=3 , UpperCamelCase=18 , UpperCamelCase=30 , UpperCamelCase=400 , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=[0.5, 0.5, 0.5] , UpperCamelCase=[0.5, 0.5, 0.5] , ) -> Optional[int]:
__lowerCAmelCase = size if size is not None else {"shortest_edge": 18}
__lowerCAmelCase = crop_size if crop_size is not None else {"height": 18, "width": 18}
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = image_size
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ):
a : Optional[int] = LevitImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = LevitImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase , "size" ) )
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def UpperCAmelCase_ ( self ) -> Tuple:
pass
def UpperCAmelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 702
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
| 0
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase__ ( UpperCamelCase_ ):
a : Union[str, Any] = ["""image_processor""", """tokenizer"""]
a : Optional[Any] = """ViTImageProcessor"""
a : Optional[Any] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase ) -> List[Any]:
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __A , )
__lowerCAmelCase = kwargs.pop("feature_extractor" )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__A , __A )
def __call__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase ) -> List[Any]:
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
__lowerCAmelCase = self.tokenizer(__A , return_tensors=__A , **__A )
if visual_prompt is not None:
__lowerCAmelCase = self.image_processor(__A , return_tensors=__A , **__A )
if images is not None:
__lowerCAmelCase = self.image_processor(__A , return_tensors=__A , **__A )
if visual_prompt is not None and images is not None:
__lowerCAmelCase = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
__lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
__lowerCAmelCase = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def UpperCAmelCase_ ( self , *UpperCamelCase , **UpperCamelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCAmelCase_ ( self , *UpperCamelCase , **UpperCamelCase ) -> int:
return self.tokenizer.decode(*__A , **__A )
@property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __A , )
return self.image_processor_class
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __A , )
return self.image_processor
| 703
|
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : List[str] = (CMStochasticIterativeScheduler,)
a : str = 1_0
def UpperCAmelCase_ ( self , **UpperCamelCase ) -> str:
__lowerCAmelCase = {
"num_train_timesteps": 201,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
config.update(**UpperCamelCase )
return config
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = 10
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = self.scheduler_classes[0](**UpperCamelCase )
scheduler.set_timesteps(UpperCamelCase )
__lowerCAmelCase = scheduler.timesteps[0]
__lowerCAmelCase = scheduler.timesteps[1]
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ ( self ) -> Any:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = 1
scheduler.set_timesteps(UpperCamelCase )
__lowerCAmelCase = scheduler.timesteps
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(UpperCamelCase ):
# 1. scale model input
__lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# 2. predict noise residual
__lowerCAmelCase = model(UpperCamelCase , UpperCamelCase )
# 3. predict previous sample x_t-1
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [106, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase )
__lowerCAmelCase = scheduler.timesteps
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# 2. predict noise residual
__lowerCAmelCase = model(UpperCamelCase , UpperCamelCase )
# 3. predict previous sample x_t-1
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [39, 30, 12, 15, 0]
with self.assertRaises(UpperCamelCase , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [39, 30, 12, 1, 0]
__lowerCAmelCase = len(UpperCamelCase )
with self.assertRaises(UpperCamelCase , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase , timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_sentencepiece_available():
import sentencepiece as sp
lowerCAmelCase : Optional[int] = 5
lowerCAmelCase : Optional[Any] = 1_0
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
a : List[str] = SpeechaTextTokenizer
a : List[str] = False
a : Dict = True
def UpperCAmelCase_ ( self ) -> Optional[Any]:
super().setUp()
__lowerCAmelCase = sp.SentencePieceProcessor()
spm_model.Load(UpperCamelCase )
__lowerCAmelCase = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(UpperCamelCase ) )]
__lowerCAmelCase = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
__lowerCAmelCase = Path(self.tmpdirname )
save_json(UpperCamelCase , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCamelCase , save_dir / VOCAB_FILES_NAMES["spm_file"] )
__lowerCAmelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(UpperCamelCase ) , 1001 )
def UpperCAmelCase_ ( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
__lowerCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [289, 50, 14, 174, 386] , )
__lowerCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase )
self.assertListEqual(UpperCamelCase , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
# fmt: off
__lowerCAmelCase = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class UpperCAmelCase__ ( unittest.TestCase ):
a : Tuple = """valhalla/s2t_mustc_multilinguial_medium"""
a : Optional[int] = """C'est trop cool"""
a : Any = """Esto es genial"""
@classmethod
def UpperCAmelCase_ ( cls ) -> Optional[int]:
__lowerCAmelCase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def UpperCAmelCase_ ( self ) -> Dict:
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def UpperCAmelCase_ ( self ) -> str:
self.assertEqual(self.tokenizer.vocab_size , 1_0000 )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
self.assertIn(UpperCamelCase , self.tokenizer.all_special_ids )
__lowerCAmelCase = [ES_CODE, 4, 1601, 47, 7647, 2]
__lowerCAmelCase = self.tokenizer.decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
__lowerCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = "fr"
__lowerCAmelCase = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , UpperCamelCase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
__lowerCAmelCase = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 704
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCAmelCase ( lowerCamelCase : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
__lowerCAmelCase = BeautifulSoup(requests.get(lowerCamelCase ).text , "html.parser" )
__lowerCAmelCase = soup.findAll("h1" )
__lowerCAmelCase = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(lowerCamelCase , lowerCamelCase )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'{key}\n{value}\n')
| 39
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self , UpperCamelCase , UpperCamelCase=7 , UpperCamelCase=3 , UpperCamelCase=18 , UpperCamelCase=30 , UpperCamelCase=400 , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=[0.5, 0.5, 0.5] , UpperCamelCase=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
__lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = image_size
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
def UpperCAmelCase_ ( self ) -> Dict:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
a : str = DPTImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = DPTImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , "image_mean" ) )
self.assertTrue(hasattr(_a , "image_std" ) )
self.assertTrue(hasattr(_a , "do_normalize" ) )
self.assertTrue(hasattr(_a , "do_resize" ) )
self.assertTrue(hasattr(_a , "size" ) )
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__lowerCAmelCase = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__lowerCAmelCase = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__lowerCAmelCase = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 705
|
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
if len(lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
__lowerCAmelCase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase ) )
]
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase ) )
]
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
if len(lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
__lowerCAmelCase = len(lowerCamelCase )
__lowerCAmelCase = matrix_length // 2
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase )]
__lowerCAmelCase = [
[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )
]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase )]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )]
return top_left, top_right, bot_left, bot_right
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
return len(lowerCamelCase ), len(matrix[0] )
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
print("\n".join(str(lowerCamelCase ) for line in matrix ) )
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
if matrix_dimensions(lowerCamelCase ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase )
__lowerCAmelCase = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
__lowerCAmelCase = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase )
__lowerCAmelCase = matrix_addition(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = matrix_addition(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase )
# construct the new matrix from our 4 quadrants
__lowerCAmelCase = []
for i in range(len(lowerCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
if matrix_dimensions(lowerCamelCase )[1] != matrix_dimensions(lowerCamelCase )[0]:
__lowerCAmelCase = (
"Unable to multiply these matrices, please check the dimensions.\n"
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(lowerCamelCase )
__lowerCAmelCase = matrix_dimensions(lowerCamelCase )
__lowerCAmelCase = matrix_dimensions(lowerCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCAmelCase = max(*lowerCamelCase , *lowerCamelCase )
__lowerCAmelCase = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase ) ) ) )
__lowerCAmelCase = matrixa
__lowerCAmelCase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__lowerCAmelCase = actual_strassen(lowerCamelCase , lowerCamelCase )
# Removing the additional zeros
for i in range(0 , lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCAmelCase : Tuple = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCAmelCase : Any = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 39
| 0
|
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = 0
if start < end:
__lowerCAmelCase = randint(snake_case__ , snake_case__ )
__lowerCAmelCase = a[end]
__lowerCAmelCase = a[pivot]
__lowerCAmelCase = temp
__lowerCAmelCase = _in_place_partition(snake_case__ , snake_case__ , snake_case__ )
count += _in_place_quick_sort(snake_case__ , snake_case__ , p - 1 )
count += _in_place_quick_sort(snake_case__ , p + 1 , snake_case__ )
return count
def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = randint(snake_case__ , snake_case__ )
__lowerCAmelCase = a[end]
__lowerCAmelCase = a[pivot]
__lowerCAmelCase = temp
__lowerCAmelCase = start - 1
for index in range(snake_case__ , snake_case__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
__lowerCAmelCase = new_pivot_index + 1
__lowerCAmelCase = a[new_pivot_index]
__lowerCAmelCase = a[index]
__lowerCAmelCase = temp
__lowerCAmelCase = a[new_pivot_index + 1]
__lowerCAmelCase = a[end]
__lowerCAmelCase = temp
return new_pivot_index + 1, count
lowerCAmelCase : List[str] = TemporaryFile()
lowerCAmelCase : Union[str, Any] = 1_0_0 # 1000 elements are to be sorted
lowerCAmelCase , lowerCAmelCase : int = 0, 1 # mean and standard deviation
lowerCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
lowerCAmelCase : Dict = np.load(outfile)
lowerCAmelCase : List[str] = len(M) - 1
lowerCAmelCase : List[str] = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 706
|
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowerCAmelCase : Optional[Any] = '''scheduler_config.json'''
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : str = 1
a : Optional[int] = 2
a : int = 3
a : Union[str, Any] = 4
a : int = 5
a : Optional[int] = 6
a : str = 7
a : List[Any] = 8
a : List[str] = 9
a : List[str] = 1_0
a : int = 1_1
a : Any = 1_2
a : Any = 1_3
a : Tuple = 1_4
@dataclass
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : torch.FloatTensor
class UpperCAmelCase__ :
a : Tuple = SCHEDULER_CONFIG_NAME
a : Union[str, Any] = []
a : str = True
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase=False , **UpperCamelCase , ) -> int:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = cls.load_config(
pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , )
return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False , **UpperCamelCase ) -> Dict:
self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase )
@property
def UpperCAmelCase_ ( self ) -> str:
return self._get_compatibles()
@classmethod
def UpperCAmelCase_ ( cls ) -> Tuple:
__lowerCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
__lowerCAmelCase = importlib.import_module(__name__.split("." )[0] )
__lowerCAmelCase = [
getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase )
]
return compatible_classes
| 39
| 0
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str ):
'''simple docstring'''
__lowerCAmelCase = cva.getAffineTransform(__snake_case , __snake_case )
return cva.warpAffine(__snake_case , __snake_case , (rows, cols) )
if __name__ == "__main__":
# read original image
lowerCAmelCase : Optional[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
lowerCAmelCase : Optional[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
lowerCAmelCase , lowerCAmelCase : Optional[Any] = gray_img.shape
# set different points to rotate image
lowerCAmelCase : List[str] = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
lowerCAmelCase : List[Any] = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
lowerCAmelCase : Optional[Any] = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
lowerCAmelCase : List[str] = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
lowerCAmelCase : Optional[Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
lowerCAmelCase : Tuple = plt.figure(1)
lowerCAmelCase : List[str] = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 707
|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase : List[Any] = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase = None ) -> Union[str, Any]:
__lowerCAmelCase = (
os.path.join(UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__lowerCAmelCase = Extractor
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__lowerCAmelCase = os.path.abspath(UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase ) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> bool:
return force_extract or (
not os.path.isfile(UpperCamelCase ) and not (os.path.isdir(UpperCamelCase ) and os.listdir(UpperCamelCase ))
)
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False ) -> str:
__lowerCAmelCase = self.extractor.infer_extractor_format(UpperCamelCase )
if not extractor_format:
return input_path
__lowerCAmelCase = self._get_output_path(UpperCamelCase )
if self._do_extract(UpperCamelCase , UpperCamelCase ):
self.extractor.extract(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return output_path
class UpperCAmelCase__ ( UpperCamelCase__ ):
@classmethod
@abstractmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool:
...
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
...
class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
a : List[bytes] = []
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
with open(UpperCamelCase , "rb" ) as f:
return f.read(UpperCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool:
if not magic_number:
__lowerCAmelCase = max(len(UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
__lowerCAmelCase = cls.read_magic_number(UpperCamelCase , UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase__ ( UpperCamelCase__ ):
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool:
return tarfile.is_tarfile(UpperCamelCase )
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict:
def resolved(UpperCamelCase ) -> str:
return os.path.realpath(os.path.abspath(UpperCamelCase ) )
def badpath(UpperCamelCase , UpperCamelCase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCamelCase , UpperCamelCase ) ).startswith(UpperCamelCase )
def badlink(UpperCamelCase , UpperCamelCase ) -> bool:
# Links are interpreted relative to the directory containing the link
__lowerCAmelCase = resolved(os.path.join(UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=UpperCamelCase )
__lowerCAmelCase = resolved(UpperCamelCase )
for finfo in members:
if badpath(finfo.name , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(UpperCamelCase , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(UpperCamelCase , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
__lowerCAmelCase = tarfile.open(UpperCamelCase )
tar_file.extractall(UpperCamelCase , members=TarExtractor.safemembers(UpperCamelCase , UpperCamelCase ) )
tar_file.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x1F\x8B"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with gzip.open(UpperCamelCase , "rb" ) as gzip_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : List[Any] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool:
if super().is_extractable(UpperCamelCase , magic_number=UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCamelCase , "rb" ) as fp:
__lowerCAmelCase = _EndRecData(UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__lowerCAmelCase = fp.read(UpperCamelCase ) # CD is where we expect it to be
if len(UpperCamelCase ) == sizeCentralDir:
__lowerCAmelCase = struct.unpack(UpperCamelCase , UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with zipfile.ZipFile(UpperCamelCase , "r" ) as zip_file:
zip_file.extractall(UpperCamelCase )
zip_file.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Tuple = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with lzma.open(UpperCamelCase ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : str = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
__lowerCAmelCase = rarfile.RarFile(UpperCamelCase )
rf.extractall(UpperCamelCase )
rf.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : int = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
__lowerCAmelCase = zstd.ZstdDecompressor()
with open(UpperCamelCase , "rb" ) as ifh, open(UpperCamelCase , "wb" ) as ofh:
dctx.copy_stream(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with bza.open(UpperCamelCase , "rb" ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with pyazr.SevenZipFile(UpperCamelCase , "r" ) as archive:
archive.extractall(UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(UpperCamelCase , "rb" ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
a : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCAmelCase_ ( cls ) -> Optional[Any]:
return max(
len(UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(UpperCamelCase , UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict:
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase , magic_number_length=UpperCamelCase )
except OSError:
return b""
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = False ) -> bool:
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=UpperCamelCase , )
__lowerCAmelCase = cls.infer_extractor_format(UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase ) -> str: # <Added version="2.4.0"/>
__lowerCAmelCase = cls._get_magic_number_max_length()
__lowerCAmelCase = cls._read_magic_number(UpperCamelCase , UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCamelCase , magic_number=UpperCamelCase ):
return extractor_format
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(UpperCamelCase ) , exist_ok=UpperCamelCase )
# Prevent parallel extractions
__lowerCAmelCase = str(Path(UpperCamelCase ).with_suffix(".lock" ) )
with FileLock(UpperCamelCase ):
shutil.rmtree(UpperCamelCase , ignore_errors=UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCamelCase , UpperCamelCase ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=UpperCamelCase , )
__lowerCAmelCase = extractor if extractor != "deprecated" else extractor_format
else:
__lowerCAmelCase = cls.extractors[extractor_format]
return extractor.extract(UpperCamelCase , UpperCamelCase )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCamelCase ):
return extractor.extract(UpperCamelCase , UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class UpperCAmelCase__ ( _UpperCAmelCase ):
a : List[str] = """openai-gpt"""
a : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , UpperCamelCase=4_0478 , UpperCamelCase=512 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=1E-5 , UpperCamelCase=0.02 , UpperCamelCase="cls_index" , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=0.1 , **UpperCamelCase , ) -> List[Any]:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = afn
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = summary_type
__lowerCAmelCase = summary_use_proj
__lowerCAmelCase = summary_activation
__lowerCAmelCase = summary_first_dropout
__lowerCAmelCase = summary_proj_to_labels
super().__init__(**lowercase_ )
| 708
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self ) -> List[str]:
# test for the above condition
self.test()
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = 0
__lowerCAmelCase = False
while not completed:
if counter == 1:
self.reset()
__lowerCAmelCase = self.advance()
if not self.does_advance(UpperCamelCase ):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.update(UpperCamelCase )
counter += 1
if counter > 1_0000:
raise Exception("update() does not fulfill the constraint." )
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly." )
@abstractmethod
def UpperCAmelCase_ ( self ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self ) -> int:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self ) -> int:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> str:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self , UpperCamelCase ) -> Dict:
super(UpperCamelCase , self ).__init__()
if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
__lowerCAmelCase = token_ids
__lowerCAmelCase = len(self.token_ids )
__lowerCAmelCase = -1 # the index of the currently fulfilled step
__lowerCAmelCase = False
def UpperCAmelCase_ ( self ) -> Optional[int]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
if self.does_advance(UpperCamelCase ):
self.fulfilled_idx += 1
__lowerCAmelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
__lowerCAmelCase = True
__lowerCAmelCase = completed
else:
# failed to make progress.
__lowerCAmelCase = True
self.reset()
return stepped, completed, reset
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = False
__lowerCAmelCase = 0
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Optional[Any]:
__lowerCAmelCase = PhrasalConstraint(self.token_ids )
if stateful:
__lowerCAmelCase = self.seqlen
__lowerCAmelCase = self.fulfilled_idx
__lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase , UpperCamelCase=True ) -> Optional[int]:
__lowerCAmelCase = max([len(UpperCamelCase ) for one in nested_token_ids] )
__lowerCAmelCase = {}
for token_ids in nested_token_ids:
__lowerCAmelCase = root
for tidx, token_id in enumerate(UpperCamelCase ):
if token_id not in level:
__lowerCAmelCase = {}
__lowerCAmelCase = level[token_id]
if no_subsets and self.has_subsets(UpperCamelCase , UpperCamelCase ):
raise ValueError(
"Each list in `nested_token_ids` can't be a complete subset of another list, but is"
F''' {nested_token_ids}.''' )
__lowerCAmelCase = root
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
__lowerCAmelCase = self.trie
for current_token in current_seq:
__lowerCAmelCase = start[current_token]
__lowerCAmelCase = list(start.keys() )
return next_tokens
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
__lowerCAmelCase = self.next_tokens(UpperCamelCase )
return len(UpperCamelCase ) == 0
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = list(root.values() )
if len(UpperCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCamelCase ) for nn in next_nodes] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
__lowerCAmelCase = self.count_leaves(UpperCamelCase )
return len(UpperCamelCase ) != leaf_count
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self , UpperCamelCase ) -> List[Any]:
super(UpperCamelCase , self ).__init__()
if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(UpperCamelCase , UpperCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
__lowerCAmelCase = DisjunctiveTrie(UpperCamelCase )
__lowerCAmelCase = nested_token_ids
__lowerCAmelCase = self.trie.max_height
__lowerCAmelCase = []
__lowerCAmelCase = False
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = self.trie.next_tokens(self.current_seq )
if len(UpperCamelCase ) == 0:
return None
else:
return token_list
def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[str]:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
if self.does_advance(UpperCamelCase ):
self.current_seq.append(UpperCamelCase )
__lowerCAmelCase = True
else:
__lowerCAmelCase = True
self.reset()
__lowerCAmelCase = self.trie.reached_leaf(self.current_seq )
__lowerCAmelCase = completed
return stepped, completed, reset
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = False
__lowerCAmelCase = []
def UpperCAmelCase_ ( self ) -> int:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Union[str, Any]:
__lowerCAmelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
__lowerCAmelCase = self.seqlen
__lowerCAmelCase = self.current_seq
__lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase ) -> Union[str, Any]:
__lowerCAmelCase = constraints
# max # of steps required to fulfill a given constraint
__lowerCAmelCase = max([c.seqlen for c in constraints] )
__lowerCAmelCase = len(UpperCamelCase )
__lowerCAmelCase = False
self.init_state()
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = []
__lowerCAmelCase = None
__lowerCAmelCase = [constraint.copy(stateful=UpperCamelCase ) for constraint in self.constraints]
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__lowerCAmelCase = constraint.advance()
if isinstance(UpperCamelCase , UpperCamelCase ):
token_list.append(UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
token_list.extend(UpperCamelCase )
else:
__lowerCAmelCase = self.inprogress_constraint.advance()
if isinstance(UpperCamelCase , UpperCamelCase ):
token_list.append(UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
token_list.extend(UpperCamelCase )
if len(UpperCamelCase ) == 0:
return None
else:
return token_list
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__lowerCAmelCase , __lowerCAmelCase = self.add(UpperCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Dict:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
__lowerCAmelCase , __lowerCAmelCase = False, False
if self.completed:
__lowerCAmelCase = True
__lowerCAmelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.inprogress_constraint.update(UpperCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase ) )
__lowerCAmelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__lowerCAmelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
__lowerCAmelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCamelCase ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = pending_constraint.update(UpperCamelCase )
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true." )
if complete:
self.complete_constraints.append(UpperCamelCase )
__lowerCAmelCase = None
if not complete and stepped:
__lowerCAmelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__lowerCAmelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__lowerCAmelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCAmelCase_ ( self , UpperCamelCase=True ) -> str:
__lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__lowerCAmelCase = [
constraint.copy(stateful=UpperCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__lowerCAmelCase = self.inprogress_constraint.copy(stateful=UpperCamelCase )
__lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 39
| 0
|
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( a__ ):
def __init__( self , UpperCamelCase=None , **UpperCamelCase ) -> Union[str, Any]:
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , lowerCAmelCase__ , )
super().__init__(args=lowerCAmelCase__ , **lowerCAmelCase__ )
| 709
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ):
a : List[Any] = KandinskyImgaImgPipeline
a : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
a : List[Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
a : Any = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Union[str, Any] = False
@property
def UpperCAmelCase_ ( self ) -> int:
return 32
@property
def UpperCAmelCase_ ( self ) -> List[str]:
return 32
@property
def UpperCAmelCase_ ( self ) -> Dict:
return self.time_input_dim
@property
def UpperCAmelCase_ ( self ) -> int:
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ) -> int:
return 100
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
__lowerCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__lowerCAmelCase = MultilingualCLIP(UpperCamelCase )
__lowerCAmelCase = text_encoder.eval()
return text_encoder
@property
def UpperCAmelCase_ ( self ) -> List[str]:
torch.manual_seed(0 )
__lowerCAmelCase = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__lowerCAmelCase = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def UpperCAmelCase_ ( self ) -> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = self.dummy_tokenizer
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
__lowerCAmelCase = DDIMScheduler(**UpperCamelCase )
__lowerCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=0 ) -> Optional[Any]:
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((256, 256) )
if str(UpperCamelCase ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(UpperCamelCase )
else:
__lowerCAmelCase = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
__lowerCAmelCase = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = "cpu"
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**UpperCamelCase )
__lowerCAmelCase = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__lowerCAmelCase = "A red cartoon frog, 4k"
__lowerCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
__lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__lowerCAmelCase = pipeline(
UpperCamelCase , image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=50 , UpperCamelCase=0.02 , UpperCamelCase=True , UpperCamelCase=None , ) -> Any:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_labels
__lowerCAmelCase = scope
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase_ ( self ) -> Dict:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self ) -> Tuple:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
__lowerCAmelCase = True
__lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase , ) -> List[Any]:
__lowerCAmelCase = BertGenerationEncoder(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
__lowerCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase , ) -> Dict:
__lowerCAmelCase = True
__lowerCAmelCase = BertGenerationEncoder(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__lowerCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , )
__lowerCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase , ) -> Any:
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = BertGenerationDecoder(config=__lowerCAmelCase ).to(__lowerCAmelCase ).eval()
# first forward pass
__lowerCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase , )
__lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )["hidden_states"][0]
__lowerCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )["hidden_states"][0]
# select random slice
__lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , *UpperCamelCase , ) -> Optional[Any]:
__lowerCAmelCase = BertGenerationDecoder(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
a : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
a : Optional[Any] = (BertGenerationDecoder,) if is_torch_available() else ()
a : int = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = BertGenerationEncoderTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def UpperCAmelCase_ ( self ) -> Dict:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
__lowerCAmelCase = "bert"
self.model_tester.create_and_check_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCAmelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__lowerCAmelCase )
def UpperCAmelCase_ ( self ) -> int:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowerCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__lowerCAmelCase )
@slow
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__lowerCAmelCase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
__lowerCAmelCase = model(__lowerCAmelCase )[0]
__lowerCAmelCase = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , __lowerCAmelCase )
__lowerCAmelCase = torch.tensor(
[[[0.17_75, 0.00_83, -0.03_21], [1.60_02, 0.12_87, 0.39_12], [2.14_73, 0.57_91, 0.60_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__lowerCAmelCase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
__lowerCAmelCase = model(__lowerCAmelCase )[0]
__lowerCAmelCase = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape , __lowerCAmelCase )
__lowerCAmelCase = torch.tensor(
[[[-0.57_88, -2.59_94, -3.70_54], [0.04_38, 4.79_97, 1.87_95], [1.58_62, 6.64_09, 4.46_38]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 710
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
lowerCAmelCase : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class UpperCAmelCase__ :
a : Optional[str] = field(default=UpperCamelCase__ , metadata={"""help""": """The input training data file (a text file)."""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCAmelCase_ ( self ) -> Tuple:
if self.train_file is not None:
__lowerCAmelCase = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__lowerCAmelCase = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCAmelCase__ :
a : PreTrainedTokenizerBase
a : Union[bool, str, PaddingStrategy] = True
a : Optional[int] = None
a : Optional[int] = None
def __call__( self , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = "label" if "label" in features[0].keys() else "labels"
__lowerCAmelCase = [feature.pop(UpperCamelCase ) for feature in features]
__lowerCAmelCase = len(UpperCamelCase )
__lowerCAmelCase = len(features[0]["input_ids"] )
__lowerCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCamelCase )] for feature in features
]
__lowerCAmelCase = list(chain(*UpperCamelCase ) )
__lowerCAmelCase = self.tokenizer.pad(
UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
__lowerCAmelCase = {k: v.view(UpperCamelCase , UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
__lowerCAmelCase = torch.tensor(UpperCamelCase , dtype=torch.intaa )
return batch
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__lowerCAmelCase = {}
if data_args.train_file is not None:
__lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCAmelCase = data_args.validation_file
__lowerCAmelCase = data_args.train_file.split("." )[-1]
__lowerCAmelCase = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__lowerCAmelCase = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__lowerCAmelCase = [f'''ending{i}''' for i in range(4 )]
__lowerCAmelCase = "sent1"
__lowerCAmelCase = "sent2"
if data_args.max_seq_length is None:
__lowerCAmelCase = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
__lowerCAmelCase = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
__lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase : Tuple ):
__lowerCAmelCase = [[context] * 4 for context in examples[context_name]]
__lowerCAmelCase = examples[question_header_name]
__lowerCAmelCase = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
__lowerCAmelCase = list(chain(*lowerCamelCase ) )
__lowerCAmelCase = list(chain(*lowerCamelCase ) )
# Tokenize
__lowerCAmelCase = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
__lowerCAmelCase = raw_datasets["train"]
if data_args.max_train_samples is not None:
__lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_train_samples )
__lowerCAmelCase = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
__lowerCAmelCase = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
__lowerCAmelCase = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
__lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_eval_samples )
__lowerCAmelCase = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
__lowerCAmelCase = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__lowerCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase : Dict ):
__lowerCAmelCase , __lowerCAmelCase = eval_predictions
__lowerCAmelCase = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
__lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("train" , lowerCamelCase )
trainer.save_metrics("train" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
__lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("eval" , lowerCamelCase )
trainer.save_metrics("eval" , lowerCamelCase )
__lowerCAmelCase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 39
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
lowerCAmelCase : List[str] = TypeVar('''T''')
class UpperCAmelCase__ ( Generic[T] ):
def __init__( self , UpperCamelCase ) -> List[Any]:
__lowerCAmelCase = data
__lowerCAmelCase = self
__lowerCAmelCase = 0
class UpperCAmelCase__ ( Generic[T] ):
def __init__( self ) -> int:
# map from node name to the node object
__lowerCAmelCase = {}
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
# create a new set with x as its member
__lowerCAmelCase = DisjointSetTreeNode(UpperCAmelCase_ )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[str]:
# find the set x belongs to (with path-compression)
__lowerCAmelCase = self.map[data]
if elem_ref != elem_ref.parent:
__lowerCAmelCase = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
# helper function for union operation
if nodea.rank > nodea.rank:
__lowerCAmelCase = nodea
else:
__lowerCAmelCase = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> List[str]:
# merge 2 disjoint sets
self.link(self.find_set(UpperCAmelCase_ ) , self.find_set(UpperCAmelCase_ ) )
class UpperCAmelCase__ ( Generic[T] ):
def __init__( self ) -> Union[str, Any]:
# connections: map from the node to the neighbouring nodes (with weights)
__lowerCAmelCase = {}
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[Any]:
# add a node ONLY if its not present in the graph
if node not in self.connections:
__lowerCAmelCase = {}
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
# add an edge with the given weight
self.add_node(UpperCAmelCase_ )
self.add_node(UpperCAmelCase_ )
__lowerCAmelCase = weight
__lowerCAmelCase = weight
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = []
__lowerCAmelCase = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda UpperCamelCase : x[2] )
# creating the disjoint set
__lowerCAmelCase = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(UpperCAmelCase_ )
# MST generation
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = edges[index]
index += 1
__lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ )
__lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
disjoint_set.union(UpperCAmelCase_ , UpperCAmelCase_ )
return graph
| 711
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {}
lowerCAmelCase : Dict[Optional[str], str] = {}
lowerCAmelCase : Dict[Optional[str], Exception] = {}
def __lowerCAmelCase ( lowerCamelCase : type , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None , ):
'''simple docstring'''
__lowerCAmelCase = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
__lowerCAmelCase = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
__lowerCAmelCase = format_type
def __lowerCAmelCase ( lowerCamelCase : Exception , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None ):
'''simple docstring'''
__lowerCAmelCase = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__lowerCAmelCase = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
lowerCAmelCase : Optional[int] = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
lowerCAmelCase : str = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
lowerCAmelCase : Any = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def __lowerCAmelCase ( lowerCamelCase : Optional[str] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __lowerCAmelCase ( lowerCamelCase : Optional[str] , **lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCAmelCase = get_format_type_from_alias(lowerCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 39
| 0
|
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowerCAmelCase : List[str] = 'base_with_context'
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
__lowerCAmelCase = weights[f'''layers_{lyr_num}''']
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
__lowerCAmelCase = ly_weight['''attention''']
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def __lowerCAmelCase ( lowerCamelCase : Tuple , lowerCamelCase : str ):
'''simple docstring'''
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
__lowerCAmelCase = weights[f'''layers_{lyr_num}''']
__lowerCAmelCase = ly_weight['''attention''']
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowercase )
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__lowerCAmelCase = weights[f'''layers_{lyr_num}''']
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
__lowerCAmelCase = ly_weight['''self_attention''']
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
__lowerCAmelCase = ly_weight['''MultiHeadDotProductAttention_0''']
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCAmelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__lowerCAmelCase = jnp.tree_util.tree_map(onp.array , _lowercase )
__lowerCAmelCase = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
__lowerCAmelCase = os.path.join(args.checkpoint_path , ".." , "config.gin" )
__lowerCAmelCase = inference.parse_training_gin_file(_lowercase , _lowercase )
__lowerCAmelCase = inference.InferenceModel(args.checkpoint_path , _lowercase )
__lowerCAmelCase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
__lowerCAmelCase = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
__lowerCAmelCase = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
__lowerCAmelCase = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__lowerCAmelCase = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , _lowercase )
__lowerCAmelCase = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , _lowercase )
__lowerCAmelCase = load_decoder(ta_checkpoint["target"]["decoder"] , _lowercase )
__lowerCAmelCase = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
__lowerCAmelCase = SpectrogramDiffusionPipeline(
notes_encoder=_lowercase , continuous_encoder=_lowercase , decoder=_lowercase , scheduler=_lowercase , melgan=_lowercase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=f'{MODEL}/checkpoint_500000',
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
lowerCAmelCase : List[str] = parser.parse_args()
main(args)
| 712
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCAmelCase ( lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
__lowerCAmelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCAmelCase = [4, 4, 4, 4]
__lowerCAmelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
else:
__lowerCAmelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCAmelCase = 96
elif "small" in model_name:
__lowerCAmelCase = 96
elif "base" in model_name:
__lowerCAmelCase = 1_28
elif "large" in model_name:
__lowerCAmelCase = 1_92
elif "xlarge" in model_name:
__lowerCAmelCase = 2_56
elif "huge" in model_name:
__lowerCAmelCase = 3_52
# set label information
__lowerCAmelCase = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
__lowerCAmelCase = "imagenet-22k-id2label.json"
else:
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = FocalNetConfig(
embed_dim=lowerCamelCase , depths=lowerCamelCase , focal_levels=lowerCamelCase , focal_windows=lowerCamelCase , use_conv_embed=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase , use_post_layernorm=lowerCamelCase , use_layerscale=lowerCamelCase , )
return config
def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__lowerCAmelCase = "encoder." + name
if "encoder.layers" in name:
__lowerCAmelCase = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
__lowerCAmelCase = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
__lowerCAmelCase = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCAmelCase = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCAmelCase = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCAmelCase = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
__lowerCAmelCase = "layernorm.weight"
if name == "norm.bias":
__lowerCAmelCase = "layernorm.bias"
if "head" in name:
__lowerCAmelCase = name.replace("head" , "classifier" )
else:
__lowerCAmelCase = "focalnet." + name
return name
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
__lowerCAmelCase = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
__lowerCAmelCase = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCamelCase )
__lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
__lowerCAmelCase = state_dict.pop(lowerCamelCase )
__lowerCAmelCase = val
__lowerCAmelCase = get_focalnet_config(lowerCamelCase )
__lowerCAmelCase = FocalNetForImageClassification(lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase )
# verify conversion
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = BitImageProcessor(
do_resize=lowerCamelCase , size={"shortest_edge": 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase , crop_size=2_24 , do_normalize=lowerCamelCase , image_mean=lowerCamelCase , image_std=lowerCamelCase , )
__lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
__lowerCAmelCase = processor(images=lowerCamelCase , return_tensors="pt" )
__lowerCAmelCase = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
__lowerCAmelCase = image_transforms(lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase , atol=1e-4 )
__lowerCAmelCase = model(**lowerCamelCase )
__lowerCAmelCase = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCAmelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
__lowerCAmelCase = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
__lowerCAmelCase = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
__lowerCAmelCase = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
__lowerCAmelCase = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
__lowerCAmelCase = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print(f'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(f'''{model_name}''' )
processor.push_to_hub(f'''{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 39
| 0
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 713
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase : str = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase : Optional[Any] = {
'''squeezebert/squeezebert-uncased''': 5_1_2,
'''squeezebert/squeezebert-mnli''': 5_1_2,
'''squeezebert/squeezebert-mnli-headless''': 5_1_2,
}
lowerCAmelCase : Tuple = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Dict = VOCAB_FILES_NAMES
a : Any = PRETRAINED_VOCAB_FILES_MAP
a : Dict = PRETRAINED_INIT_CONFIGURATION
a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[Any] = SqueezeBertTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ) -> List[Any]:
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(UpperCamelCase , normalizer_state.pop("type" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**UpperCamelCase )
__lowerCAmelCase = do_lower_case
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None ) -> str:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __lowerCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCamelCase ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def __lowerCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def __lowerCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCamelCase ):
http_head("https://huggingface.co" )
| 714
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(lowerCamelCase ) / len(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
| 0
|
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 715
|
'''simple docstring'''
import re
def __lowerCAmelCase ( lowerCamelCase : str ):
'''simple docstring'''
__lowerCAmelCase = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowerCamelCase , lowerCamelCase ) )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 39
| 0
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 6_5_0, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Any:
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=A_ , )
assert hasattr(self , "env" )
def UpperCAmelCase_ ( self , UpperCamelCase=1 ) -> Optional[Any]:
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=A_ , instance_type=self.instance_type , debugger_hook_config=A_ , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Union[str, Any]:
TrainingJobAnalytics(A_ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
def UpperCAmelCase_ ( self ) -> int:
__lowerCAmelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__lowerCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , A_ )
| 716
|
'''simple docstring'''
import os
import sys
import unittest
lowerCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = {"BertModelTest": "BertModelTester"}
__lowerCAmelCase = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase )
__lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase )
__lowerCAmelCase = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
__lowerCAmelCase = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
__lowerCAmelCase = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
def __lowerCAmelCase ( lowerCamelCase : int ) -> Optional[int]:
'''simple docstring'''
if len(__lowerCAmelCase ) <= 1:
return lst
__lowerCAmelCase = 1
while i < len(__lowerCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__lowerCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
__lowerCAmelCase = 1
return lst
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 717
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : torch.FloatTensor
class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self , UpperCamelCase = 16 , UpperCamelCase = 88 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = 1 , UpperCamelCase = 0.0 , UpperCamelCase = 32 , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = "geglu" , UpperCamelCase = True , UpperCamelCase = True , ) -> List[str]:
super().__init__()
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = attention_head_dim
__lowerCAmelCase = num_attention_heads * attention_head_dim
__lowerCAmelCase = in_channels
__lowerCAmelCase = torch.nn.GroupNorm(num_groups=UpperCamelCase , num_channels=UpperCamelCase , eps=1E-6 , affine=UpperCamelCase )
__lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase )
# 3. Define transformers blocks
__lowerCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , cross_attention_dim=UpperCamelCase , activation_fn=UpperCamelCase , attention_bias=UpperCamelCase , double_self_attention=UpperCamelCase , norm_elementwise_affine=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
__lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=1 , UpperCamelCase=None , UpperCamelCase = True , ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = hidden_states.shape
__lowerCAmelCase = batch_frames // num_frames
__lowerCAmelCase = hidden_states
__lowerCAmelCase = hidden_states[None, :].reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__lowerCAmelCase = self.norm(UpperCamelCase )
__lowerCAmelCase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = self.proj_in(UpperCamelCase )
# 2. Blocks
for block in self.transformer_blocks:
__lowerCAmelCase = block(
UpperCamelCase , encoder_hidden_states=UpperCamelCase , timestep=UpperCamelCase , cross_attention_kwargs=UpperCamelCase , class_labels=UpperCamelCase , )
# 3. Output
__lowerCAmelCase = self.proj_out(UpperCamelCase )
__lowerCAmelCase = (
hidden_states[None, None, :]
.reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__lowerCAmelCase = hidden_states.reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=3 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=224 , UpperCamelCase=1000 , UpperCamelCase=[3, 3, 6, 4] , UpperCamelCase=[48, 56, 112, 220] , ) -> Any:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = num_labels
__lowerCAmelCase = image_size
__lowerCAmelCase = layer_depths
__lowerCAmelCase = embed_dims
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> Dict:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase_ , layer_scale_init_value=1E-5 , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
__lowerCAmelCase = SwiftFormerModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = SwiftFormerForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__lowerCAmelCase = SwiftFormerForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self ) -> str:
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)) = self.prepare_config_and_inputs()
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( __a , __a , unittest.TestCase ):
a : Any = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
a : List[Any] = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
a : Union[str, Any] = False
a : List[Any] = False
a : int = False
a : List[str] = False
a : List[Any] = False
def UpperCAmelCase_ ( self ) -> int:
__lowerCAmelCase = SwiftFormerModelTester(self )
__lowerCAmelCase = ConfigTester(
self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def UpperCAmelCase_ ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
pass
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = SwiftFormerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def UpperCAmelCase_ ( self ) -> Any:
pass
def UpperCAmelCase_ ( self ) -> str:
def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = 8
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase_ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase_ ( self ) -> str:
def _config_zero_init(UpperCamelCase ):
__lowerCAmelCase = copy.deepcopy(lowerCAmelCase_ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase_ , lowerCAmelCase_ , 1E-10 )
if isinstance(getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ ):
__lowerCAmelCase = _config_zero_init(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return configs_no_init
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = _config_zero_init(lowerCAmelCase_ )
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=lowerCAmelCase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCAmelCase_ ( self ) -> Optional[int]:
pass
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(lowerCAmelCase_ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors="pt" ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 718
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCAmelCase ( lowerCamelCase : bytes , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = f'''{sampling_rate}'''
__lowerCAmelCase = "1"
__lowerCAmelCase = "f32le"
__lowerCAmelCase = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__lowerCAmelCase = ffmpeg_process.communicate(lowerCamelCase )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
__lowerCAmelCase = output_stream[0]
__lowerCAmelCase = np.frombuffer(lowerCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : str = "f32le" , ):
'''simple docstring'''
__lowerCAmelCase = f'''{sampling_rate}'''
__lowerCAmelCase = "1"
if format_for_conversion == "s16le":
__lowerCAmelCase = 2
elif format_for_conversion == "f32le":
__lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
__lowerCAmelCase = platform.system()
if system == "Linux":
__lowerCAmelCase = "alsa"
__lowerCAmelCase = "default"
elif system == "Darwin":
__lowerCAmelCase = "avfoundation"
__lowerCAmelCase = ":0"
elif system == "Windows":
__lowerCAmelCase = "dshow"
__lowerCAmelCase = "default"
__lowerCAmelCase = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
__lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__lowerCAmelCase = _ffmpeg_stream(lowerCamelCase , lowerCamelCase )
for item in iterator:
yield item
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[Tuple[float, float], float]] = None , lowerCamelCase : str = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
__lowerCAmelCase = stream_chunk_s
else:
__lowerCAmelCase = chunk_length_s
__lowerCAmelCase = ffmpeg_microphone(lowerCamelCase , lowerCamelCase , format_for_conversion=lowerCamelCase )
if format_for_conversion == "s16le":
__lowerCAmelCase = np.intaa
__lowerCAmelCase = 2
elif format_for_conversion == "f32le":
__lowerCAmelCase = np.floataa
__lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
__lowerCAmelCase = chunk_length_s / 6
__lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCamelCase , (int, float) ):
__lowerCAmelCase = [stride_length_s, stride_length_s]
__lowerCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__lowerCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__lowerCAmelCase = datetime.datetime.now()
__lowerCAmelCase = datetime.timedelta(seconds=lowerCamelCase )
for item in chunk_bytes_iter(lowerCamelCase , lowerCamelCase , stride=(stride_left, stride_right) , stream=lowerCamelCase ):
# Put everything back in numpy scale
__lowerCAmelCase = np.frombuffer(item["raw"] , dtype=lowerCamelCase )
__lowerCAmelCase = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
__lowerCAmelCase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Tuple[int, int] , lowerCamelCase : bool = False ):
'''simple docstring'''
__lowerCAmelCase = B""
__lowerCAmelCase , __lowerCAmelCase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
__lowerCAmelCase = 0
for raw in iterator:
acc += raw
if stream and len(lowerCamelCase ) < chunk_len:
__lowerCAmelCase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
__lowerCAmelCase = (_stride_left, stride_right)
__lowerCAmelCase = {"raw": acc[:chunk_len], "stride": stride}
if stream:
__lowerCAmelCase = False
yield item
__lowerCAmelCase = stride_left
__lowerCAmelCase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCamelCase ) > stride_left:
__lowerCAmelCase = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
__lowerCAmelCase = False
yield item
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCamelCase , stdout=subprocess.PIPE , bufsize=lowerCamelCase ) as ffmpeg_process:
while True:
__lowerCAmelCase = ffmpeg_process.stdout.read(lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 39
| 0
|
'''simple docstring'''
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def __lowerCAmelCase ( ):
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 719
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __lowerCAmelCase ( lowerCamelCase : List[str] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCAmelCase__ ( UpperCamelCase__ ):
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase ) -> Tuple:
__lowerCAmelCase = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=UpperCamelCase , default=UpperCamelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=UpperCamelCase , help="Name of the model to download" )
download_parser.set_defaults(func=UpperCamelCase )
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
__lowerCAmelCase = model
__lowerCAmelCase = cache
__lowerCAmelCase = force
__lowerCAmelCase = trust_remote_code
def UpperCAmelCase_ ( self ) -> Any:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 39
| 0
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Optional[int] = logging.get_logger()
@dataclass
class UpperCAmelCase__ :
a : List[Any] = 4_2
a : Optional[int] = field(default_factory=UpperCamelCase__ )
a : Union[str, Any] = field(default_factory=UpperCamelCase__ )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
__lowerCAmelCase = len(list(m.modules() ) ) == 1 or isinstance(UpperCAmelCase__ , nn.Convad ) or isinstance(UpperCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCAmelCase__ )
def __call__( self , UpperCamelCase ) -> Dict:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def UpperCAmelCase_ ( self ) -> Any:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCamelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCAmelCase__ :
a : str = 4_2
a : List[str] = 4_2
a : Tuple = 1
a : int = field(default_factory=UpperCamelCase__ )
a : List[str] = field(default_factory=UpperCamelCase__ )
a : Union[str, Any] = True
def __call__( self , UpperCamelCase ) -> List[Any]:
__lowerCAmelCase = Tracker(self.dest )(UpperCAmelCase__ ).parametrized
__lowerCAmelCase = Tracker(self.src )(UpperCAmelCase__ ).parametrized
__lowerCAmelCase = list(filter(lambda UpperCamelCase : type(UpperCAmelCase__ ) not in self.src_skip , UpperCAmelCase__ ) )
__lowerCAmelCase = list(filter(lambda UpperCamelCase : type(UpperCAmelCase__ ) not in self.dest_skip , UpperCAmelCase__ ) )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ) and self.raise_if_mismatch:
raise Exception(
F'''Numbers of operations are different. Source module has {len(UpperCAmelCase__ )} operations while'''
F''' destination module has {len(UpperCAmelCase__ )}.''' )
for dest_m, src_m in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
class UpperCAmelCase__ ( nn.Module ):
def __init__( self , UpperCamelCase ) -> List[str]:
super().__init__()
__lowerCAmelCase = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), F'''Unexpected layer name {k}'''
__lowerCAmelCase = len(UpperCAmelCase__ ) + 1
feature_blocks.append((F'''res{block_index}''', v) )
__lowerCAmelCase = nn.ModuleDict(UpperCAmelCase__ )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[Any]:
return get_trunk_forward_outputs(
UpperCAmelCase__ , out_feat_keys=UpperCAmelCase__ , feature_blocks=self._feature_blocks , )
class UpperCAmelCase__ ( UpperCamelCase__ ):
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
__lowerCAmelCase = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , UpperCamelCase ) -> Callable[[], Tuple[nn.Module, Dict]]:
# default to timm!
if x not in self:
__lowerCAmelCase = self.convert_name_to_timm(UpperCAmelCase__ )
__lowerCAmelCase = partial(lambda: (timm.create_model(UpperCAmelCase__ , pretrained=UpperCAmelCase__ ).eval(), None) )
else:
__lowerCAmelCase = super().__getitem__(UpperCAmelCase__ )
return val
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __getitem__( self , UpperCamelCase ) -> Callable[[], nn.Module]:
if "seer" in x and "in1k" not in x:
__lowerCAmelCase = RegNetModel
else:
__lowerCAmelCase = RegNetForImageClassification
return val
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : List[Any] , lowerCamelCase : List[Tuple[str, str]] ):
'''simple docstring'''
for from_key, to_key in keys:
__lowerCAmelCase = from_state_dict[from_key].clone()
print(f'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Callable[[], nn.Module] , lowerCamelCase : Callable[[], nn.Module] , lowerCamelCase : RegNetConfig , lowerCamelCase : Path , lowerCamelCase : bool = True , ):
'''simple docstring'''
print(f'''Converting {name}...''' )
with torch.no_grad():
__lowerCAmelCase , __lowerCAmelCase = from_model_func()
__lowerCAmelCase = our_model_func(_UpperCamelCase ).eval()
__lowerCAmelCase = ModuleTransfer(src=_UpperCamelCase , dest=_UpperCamelCase , raise_if_mismatch=_UpperCamelCase )
__lowerCAmelCase = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(_UpperCamelCase )
if from_state_dict is not None:
__lowerCAmelCase = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__lowerCAmelCase = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
__lowerCAmelCase = manually_copy_vissl_head(_UpperCamelCase , our_model.state_dict() , _UpperCamelCase )
our_model.load_state_dict(_UpperCamelCase )
__lowerCAmelCase = our_model(_UpperCamelCase , output_hidden_states=_UpperCamelCase )
__lowerCAmelCase = (
our_outputs.logits if isinstance(_UpperCamelCase , _UpperCamelCase ) else our_outputs.last_hidden_state
)
__lowerCAmelCase = from_model(_UpperCamelCase )
__lowerCAmelCase = from_output[-1] if type(_UpperCamelCase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__lowerCAmelCase = our_outputs.hidden_states[-1]
assert torch.allclose(_UpperCamelCase , _UpperCamelCase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=_UpperCamelCase , )
__lowerCAmelCase = 2_24 if "seer" not in name else 3_84
# we can use the convnext one
__lowerCAmelCase = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=_UpperCamelCase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=_UpperCamelCase , )
print(f'''Pushed {name}''' )
def __lowerCAmelCase ( lowerCamelCase : Path , lowerCamelCase : str = None , lowerCamelCase : bool = True ):
'''simple docstring'''
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = 10_00
__lowerCAmelCase = (1, num_labels)
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = num_labels
__lowerCAmelCase = json.load(open(cached_download(hf_hub_url(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) ) , "r" ) )
__lowerCAmelCase = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = partial(_UpperCamelCase , num_labels=_UpperCamelCase , idalabel=_UpperCamelCase , labelaid=_UpperCamelCase )
__lowerCAmelCase = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 1_60, 3_84] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 2_40, 5_28] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 1_28, 2_88, 6_72] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 1_68, 4_08, 9_12] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 1_92, 4_32, 10_08] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 2_40, 5_60, 13_60] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 3_92, 7_84, 16_24] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 2_40, 7_20, 19_20] , groups_width=1_20 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[2_56, 5_12, 8_96, 20_48] , groups_width=1_28 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[3_36, 6_72, 13_44, 25_20] , groups_width=1_68 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 1_04, 2_08, 4_40] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 1_12, 2_56, 6_08] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 1_28, 3_20, 7_68] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 1_20, 3_36, 8_88] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 2_16, 5_76, 15_12] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[1_28, 1_92, 5_12, 10_88] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[1_44, 2_88, 5_76, 12_96] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 4_48, 8_96, 20_16] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[2_24, 4_48, 12_32, 30_24] , groups_width=1_12 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
}
__lowerCAmelCase = NameToOurModelFuncMap()
__lowerCAmelCase = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowerCamelCase : str , lowerCamelCase : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
__lowerCAmelCase = torch.hub.load_state_dict_from_url(_UpperCamelCase , model_dir=str(_UpperCamelCase ) , map_location="cpu" )
__lowerCAmelCase = model_func()
# check if we have a head, if yes add it
__lowerCAmelCase = files["classy_state_dict"]["base_model"]["model"]
__lowerCAmelCase = model_state_dict["trunk"]
model.load_state_dict(_UpperCamelCase )
return model.eval(), model_state_dict["heads"]
# pretrained
__lowerCAmelCase = partial(
_UpperCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowerCAmelCase = partial(
_UpperCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowerCAmelCase = partial(
_UpperCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__lowerCAmelCase = partial(
_UpperCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
__lowerCAmelCase = partial(
_UpperCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowerCAmelCase = partial(
_UpperCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowerCAmelCase = partial(
_UpperCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__lowerCAmelCase = partial(
_UpperCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
_UpperCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _UpperCamelCase , _UpperCamelCase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_UpperCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowerCAmelCase : Union[str, Any] = parser.parse_args()
lowerCAmelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 720
|
'''simple docstring'''
def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = 1
__lowerCAmelCase = 2
while i * i <= n:
__lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = 1
__lowerCAmelCase = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCamelCase ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 39
| 0
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class UpperCAmelCase__ ( __a ):
a : str = """MCTCTFeatureExtractor"""
a : Any = """AutoTokenizer"""
def __init__( self , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
super().__init__(A__ , A__ )
__lowerCAmelCase = self.feature_extractor
__lowerCAmelCase = False
def __call__( self , *UpperCamelCase , **UpperCamelCase ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A__ , **A__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
__lowerCAmelCase = kwargs.pop("raw_speech" )
else:
__lowerCAmelCase = kwargs.pop("audio" , A__ )
__lowerCAmelCase = kwargs.pop("sampling_rate" , A__ )
__lowerCAmelCase = kwargs.pop("text" , A__ )
if len(A__ ) > 0:
__lowerCAmelCase = args[0]
__lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
__lowerCAmelCase = self.feature_extractor(A__ , *A__ , sampling_rate=A__ , **A__ )
if text is not None:
__lowerCAmelCase = self.tokenizer(A__ , **A__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowerCAmelCase = encodings["input_ids"]
return inputs
def UpperCAmelCase_ ( self , *UpperCamelCase , **UpperCamelCase ) -> List[str]:
return self.tokenizer.batch_decode(*A__ , **A__ )
def UpperCAmelCase_ ( self , *UpperCamelCase , **UpperCamelCase ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*A__ , **A__ )
__lowerCAmelCase = kwargs.pop("input_features" , A__ )
__lowerCAmelCase = kwargs.pop("labels" , A__ )
if len(A__ ) > 0:
__lowerCAmelCase = args[0]
__lowerCAmelCase = args[1:]
if input_features is not None:
__lowerCAmelCase = self.feature_extractor.pad(A__ , *A__ , **A__ )
if labels is not None:
__lowerCAmelCase = self.tokenizer.pad(A__ , **A__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__lowerCAmelCase = labels["input_ids"]
return input_features
def UpperCAmelCase_ ( self , *UpperCamelCase , **UpperCamelCase ) -> Dict:
return self.tokenizer.decode(*A__ , **A__ )
@contextmanager
def UpperCAmelCase_ ( self ) -> Any:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
__lowerCAmelCase = True
__lowerCAmelCase = self.tokenizer
yield
__lowerCAmelCase = self.feature_extractor
__lowerCAmelCase = False
| 721
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Optional[Any] = """dpr"""
def __init__( self , UpperCamelCase=3_0522 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=1E-12 , UpperCamelCase=0 , UpperCamelCase="absolute" , UpperCamelCase = 0 , **UpperCamelCase , ) -> Tuple:
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = projection_dim
__lowerCAmelCase = position_embedding_type
| 39
| 0
|
'''simple docstring'''
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __lowerCAmelCase ( lowerCamelCase : str = "" ):
'''simple docstring'''
__lowerCAmelCase = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
__lowerCAmelCase = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , "html.parser" )
__lowerCAmelCase = soup.find_all("td" , attrs="titleColumn" )
__lowerCAmelCase = soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
}
def __lowerCAmelCase ( lowerCamelCase : List[Any] = "IMDb_Top_250_Movies.csv" ):
'''simple docstring'''
__lowerCAmelCase = get_imdb_top_aaa_movies()
with open(_SCREAMING_SNAKE_CASE , "w" , newline="" ) as out_file:
__lowerCAmelCase = csv.writer(_SCREAMING_SNAKE_CASE )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 700
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
| 0
|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : Any = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def __lowerCAmelCase ( lowerCamelCase : Tuple , lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 10_24,
"hidden_size": 7_68,
"max_length": 5_12,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 10_24,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
__lowerCAmelCase = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__lowerCAmelCase = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=lowercase__ , output_all_encodings=lowercase__ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , lowercase__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__lowerCAmelCase = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__lowerCAmelCase = os.path.join(get_home_dir() , "models" )
__lowerCAmelCase = _load_vocab(lowercase__ , lowercase__ , lowercase__ , cls=lowercase__ )
__lowerCAmelCase = nlp.model.BERTModel(
lowercase__ , len(lowercase__ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=lowercase__ , use_token_type_embed=lowercase__ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=lowercase__ , use_decoder=lowercase__ , )
original_bort.load_parameters(lowercase__ , cast_dtype=lowercase__ , ignore_extra=lowercase__ )
__lowerCAmelCase = original_bort._collect_params_with_prefix()
# Build our config 🤗
__lowerCAmelCase = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.0_2,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(lowercase__ ),
}
__lowerCAmelCase = BertConfig.from_dict(lowercase__ )
__lowerCAmelCase = BertForMaskedLM(lowercase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase : Any ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase : Tuple , lowerCamelCase : Optional[int] ):
__lowerCAmelCase = hf_param.shape
__lowerCAmelCase = to_torch(params[gluon_param] )
__lowerCAmelCase = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
__lowerCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__lowerCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__lowerCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__lowerCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__lowerCAmelCase = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__lowerCAmelCase = hf_bort_model.bert.encoder.layer[i]
# self attention
__lowerCAmelCase = layer.attention.self
__lowerCAmelCase = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
__lowerCAmelCase = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
__lowerCAmelCase = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
__lowerCAmelCase = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
__lowerCAmelCase = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
__lowerCAmelCase = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
__lowerCAmelCase = layer.attention.output
__lowerCAmelCase = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
__lowerCAmelCase = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
__lowerCAmelCase = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
__lowerCAmelCase = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
__lowerCAmelCase = layer.intermediate
__lowerCAmelCase = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
__lowerCAmelCase = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
__lowerCAmelCase = layer.output
__lowerCAmelCase = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
__lowerCAmelCase = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
__lowerCAmelCase = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
__lowerCAmelCase = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__lowerCAmelCase = RobertaTokenizer.from_pretrained("roberta-base" )
__lowerCAmelCase = tokenizer.encode_plus(lowercase__ )["input_ids"]
# Get gluon output
__lowerCAmelCase = mx.nd.array([input_ids] )
__lowerCAmelCase = original_bort(inputs=lowercase__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowercase__ )
__lowerCAmelCase = BertModel.from_pretrained(lowercase__ )
hf_bort_model.eval()
__lowerCAmelCase = tokenizer.encode_plus(lowercase__ , return_tensors="pt" )
__lowerCAmelCase = hf_bort_model(**lowercase__ )[0]
__lowerCAmelCase = output_gluon[0].asnumpy()
__lowerCAmelCase = output_hf[0].detach().numpy()
__lowerCAmelCase = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__lowerCAmelCase = np.allclose(lowercase__ , lowercase__ , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , lowercase__ )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase : List[str] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 701
|
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCAmelCase = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
__lowerCAmelCase = features.copy()
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , split=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
if issubclass(lowerCamelCase , lowerCamelCase ):
__lowerCAmelCase = jsonl_path
elif issubclass(lowerCamelCase , lowerCamelCase ):
__lowerCAmelCase = [jsonl_path]
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : str=("train",) ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase )
for split in splits:
__lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase ( lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : List[str] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read()
_check_json_datasetdict(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_datasetdict(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : int ):
'''simple docstring'''
if split:
__lowerCAmelCase = {split: jsonl_path}
else:
__lowerCAmelCase = "train"
__lowerCAmelCase = {"train": jsonl_path, "test": jsonl_path}
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_datasetdict(lowerCamelCase , lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return json.load(lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
return [json.loads(lowerCamelCase ) for line in buffer]
class UpperCAmelCase__ :
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json_function(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
assert isinstance(exported_content[0] , UpperCamelCase )
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json_function(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
assert isinstance(exported_content[0] , UpperCamelCase )
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase ) == 10
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any:
with pytest.raises(UpperCamelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
__lowerCAmelCase = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}'''
__lowerCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , compression=UpperCamelCase ).write()
with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f:
__lowerCAmelCase = f.read()
with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f:
__lowerCAmelCase = f.read()
assert exported_content == original_content
| 39
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 702
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( _UpperCAmelCase , unittest.TestCase ):
a : int = DebertaTokenizer
a : Optional[int] = True
a : Tuple = DebertaTokenizerFast
def UpperCAmelCase_ ( self ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
__lowerCAmelCase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
__lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowerCAmelCase = {"""unk_token""": """[UNK]"""}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase__ ) )
def UpperCAmelCase_ ( self , **UpperCamelCase ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
__lowerCAmelCase = """lower newer"""
__lowerCAmelCase = """lower newer"""
return input_text, output_text
def UpperCAmelCase_ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = """lower newer"""
__lowerCAmelCase = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowerCAmelCase = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
__lowerCAmelCase = tokens + [tokenizer.unk_token]
__lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = tokenizer("Hello" , "World" )
__lowerCAmelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
__lowerCAmelCase = tokenizer.encode("sequence builders" , add_special_tokens=lowercase__ )
__lowerCAmelCase = tokenizer.encode("multi-sequence build" , add_special_tokens=lowercase__ )
__lowerCAmelCase = tokenizer.encode(
"sequence builders" , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__lowerCAmelCase = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__lowerCAmelCase = tokenizer_class.from_pretrained("microsoft/deberta-base" )
__lowerCAmelCase = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
__lowerCAmelCase = tokenizer(lowercase__ , padding=lowercase__ )
__lowerCAmelCase = [tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ ) for seq in encoding["""input_ids"""]]
# fmt: off
__lowerCAmelCase = {
"""input_ids""": [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__lowerCAmelCase = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , lowercase__ )
for expected, decoded in zip(lowercase__ , lowercase__ ):
self.assertEqual(lowercase__ , lowercase__ )
| 703
|
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : List[str] = (CMStochasticIterativeScheduler,)
a : str = 1_0
def UpperCAmelCase_ ( self , **UpperCamelCase ) -> str:
__lowerCAmelCase = {
"num_train_timesteps": 201,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
config.update(**UpperCamelCase )
return config
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = 10
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = self.scheduler_classes[0](**UpperCamelCase )
scheduler.set_timesteps(UpperCamelCase )
__lowerCAmelCase = scheduler.timesteps[0]
__lowerCAmelCase = scheduler.timesteps[1]
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ ( self ) -> Any:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = 1
scheduler.set_timesteps(UpperCamelCase )
__lowerCAmelCase = scheduler.timesteps
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(UpperCamelCase ):
# 1. scale model input
__lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# 2. predict noise residual
__lowerCAmelCase = model(UpperCamelCase , UpperCamelCase )
# 3. predict previous sample x_t-1
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [106, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase )
__lowerCAmelCase = scheduler.timesteps
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# 2. predict noise residual
__lowerCAmelCase = model(UpperCamelCase , UpperCamelCase )
# 3. predict previous sample x_t-1
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [39, 30, 12, 15, 0]
with self.assertRaises(UpperCamelCase , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [39, 30, 12, 1, 0]
__lowerCAmelCase = len(UpperCamelCase )
with self.assertRaises(UpperCamelCase , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase , timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCAmelCase : Dict = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=1 ) -> List[str]:
__lowerCAmelCase = tokenizer
__lowerCAmelCase = dataset
__lowerCAmelCase = len(__snake_case ) if n_tasks is None else n_tasks
__lowerCAmelCase = n_copies
def __iter__( self ) -> Union[str, Any]:
__lowerCAmelCase = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
__lowerCAmelCase = self.tokenizer(__snake_case , padding=__snake_case , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
__lowerCAmelCase = start_length
__lowerCAmelCase = eof_strings
__lowerCAmelCase = tokenizer
def __call__( self , UpperCamelCase , UpperCamelCase , **UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
__lowerCAmelCase = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__snake_case )
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCAmelCase = re.split("(%s)" % "|".join(a_ ) , a_ )
# last string should be ""
return "".join(string_list[:-2] )
def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple=20 , **lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = defaultdict(a_ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(a_ ) ):
with torch.no_grad():
__lowerCAmelCase = batch['''ids'''].shape[-1]
__lowerCAmelCase = accelerator.unwrap_model(a_ ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=a_ , **a_ )
# each task is generated batch_size times
__lowerCAmelCase = batch['''task_id'''].repeat(a_ )
__lowerCAmelCase = accelerator.pad_across_processes(
a_ , dim=1 , pad_index=tokenizer.pad_token_id )
__lowerCAmelCase = accelerator.gather((generated_tokens, generated_tasks) )
__lowerCAmelCase = generated_tokens.cpu().numpy()
__lowerCAmelCase = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(a_ , a_ ):
gen_token_dict[task].append(a_ )
__lowerCAmelCase = [[] for _ in range(a_ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__lowerCAmelCase = tokenizer.decode(a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ )
code_gens[task].append(remove_last_block(a_ ) )
return code_gens
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = HfArgumentParser(a_ )
__lowerCAmelCase = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__lowerCAmelCase = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__lowerCAmelCase = '''false'''
if args.num_workers is None:
__lowerCAmelCase = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__lowerCAmelCase = Accelerator()
set_seed(args.seed , device_specific=a_ )
# Load model and tokenizer
__lowerCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
__lowerCAmelCase = tokenizer.eos_token
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__lowerCAmelCase = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , a_ , a_ )] ),
}
# Load evaluation dataset and metric
__lowerCAmelCase = load_dataset("openai_humaneval" )
__lowerCAmelCase = load_metric("code_eval" )
__lowerCAmelCase = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
__lowerCAmelCase = args.n_samples // args.batch_size
__lowerCAmelCase = TokenizedDataset(a_ , human_eval["test"] , n_copies=a_ , n_tasks=a_ )
# do not confuse args.batch_size, which is actually the num_return_sequences
__lowerCAmelCase = DataLoader(a_ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__lowerCAmelCase = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
__lowerCAmelCase = accelerator.prepare(a_ , a_ )
__lowerCAmelCase = complete_code(
a_ , a_ , a_ , a_ , n_tasks=a_ , batch_size=args.batch_size , **a_ , )
if accelerator.is_main_process:
__lowerCAmelCase = []
for task in tqdm(range(a_ ) ):
__lowerCAmelCase = human_eval['''test'''][task]['''test''']
__lowerCAmelCase = f'''check({human_eval['test'][task]['entry_point']})'''
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
__lowerCAmelCase = code_eval_metric.compute(
references=a_ , predictions=a_ , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(a_ , a_ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 704
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCAmelCase ( lowerCamelCase : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
__lowerCAmelCase = BeautifulSoup(requests.get(lowerCamelCase ).text , "html.parser" )
__lowerCAmelCase = soup.findAll("h1" )
__lowerCAmelCase = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(lowerCamelCase , lowerCamelCase )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'{key}\n{value}\n')
| 39
| 0
|
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Tuple = (DDIMParallelScheduler,)
a : Optional[int] = (("eta", 0.0), ("num_inference_steps", 5_0))
def UpperCAmelCase_ ( self , **UpperCamelCase ) -> Dict:
__lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**UpperCamelCase )
return config
def UpperCAmelCase_ ( self , **UpperCamelCase ) -> int:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(**UpperCamelCase )
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase = 10, 0.0
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase )
for t in scheduler.timesteps:
__lowerCAmelCase = model(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
return sample
def UpperCAmelCase_ ( self ) -> Tuple:
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCamelCase )
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(steps_offset=1 )
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCAmelCase_ ( self ) -> Tuple:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase , beta_end=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> int:
self.check_over_configs(thresholding=UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase , prediction_type=UpperCamelCase , sample_max_value=UpperCamelCase , )
def UpperCAmelCase_ ( self ) -> List[Any]:
for t in [1, 10, 49]:
self.check_over_forward(time_step=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=UpperCamelCase , num_inference_steps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=UpperCamelCase , eta=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def UpperCAmelCase_ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase = 10, 0.0
scheduler.set_timesteps(UpperCamelCase )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
__lowerCAmelCase = self.dummy_sample_deter + 0.1
__lowerCAmelCase = self.dummy_sample_deter - 0.1
__lowerCAmelCase = samplea.shape[0]
__lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
__lowerCAmelCase = torch.arange(UpperCamelCase )[0:3, None].repeat(1 , UpperCamelCase )
__lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__lowerCAmelCase = scheduler.batch_step_no_noise(UpperCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , UpperCamelCase )
__lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = self.full_loop()
__lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.full_loop(prediction_type="v_prediction" )
__lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = self.full_loop(set_alpha_to_one=UpperCamelCase , beta_start=0.01 )
__lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def UpperCAmelCase_ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.full_loop(set_alpha_to_one=UpperCamelCase , beta_start=0.01 )
__lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3
| 705
|
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
if len(lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
__lowerCAmelCase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase ) )
]
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase ) )
]
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
if len(lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
__lowerCAmelCase = len(lowerCamelCase )
__lowerCAmelCase = matrix_length // 2
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase )]
__lowerCAmelCase = [
[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )
]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase )]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )]
return top_left, top_right, bot_left, bot_right
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
return len(lowerCamelCase ), len(matrix[0] )
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
print("\n".join(str(lowerCamelCase ) for line in matrix ) )
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
if matrix_dimensions(lowerCamelCase ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase )
__lowerCAmelCase = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
__lowerCAmelCase = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase )
__lowerCAmelCase = matrix_addition(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = matrix_addition(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase )
# construct the new matrix from our 4 quadrants
__lowerCAmelCase = []
for i in range(len(lowerCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
if matrix_dimensions(lowerCamelCase )[1] != matrix_dimensions(lowerCamelCase )[0]:
__lowerCAmelCase = (
"Unable to multiply these matrices, please check the dimensions.\n"
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(lowerCamelCase )
__lowerCAmelCase = matrix_dimensions(lowerCamelCase )
__lowerCAmelCase = matrix_dimensions(lowerCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCAmelCase = max(*lowerCamelCase , *lowerCamelCase )
__lowerCAmelCase = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase ) ) ) )
__lowerCAmelCase = matrixa
__lowerCAmelCase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__lowerCAmelCase = actual_strassen(lowerCamelCase , lowerCamelCase )
# Removing the additional zeros
for i in range(0 , lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCAmelCase : Tuple = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCAmelCase : Any = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 39
| 0
|
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __lowerCAmelCase ( lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = model.config
__lowerCAmelCase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
__lowerCAmelCase = MBartConfig(
is_decoder=lowercase_ , is_encoder_decoder=lowercase_ , add_cross_attention=lowercase_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowercase_ , add_final_layer_norm=lowercase_ , )
return encoder_config, decoder_config
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
if "encoder.model" in name:
__lowerCAmelCase = name.replace("encoder.model" , "encoder" )
if "decoder.model" in name:
__lowerCAmelCase = name.replace("decoder.model" , "decoder" )
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
__lowerCAmelCase = "encoder." + name
if "attn.proj" in name:
__lowerCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "mask" not in name:
__lowerCAmelCase = name.replace("attn" , "attention.self" )
if "norm1" in name:
__lowerCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__lowerCAmelCase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
__lowerCAmelCase = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
__lowerCAmelCase = "encoder.layernorm.bias"
return name
def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : int ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
__lowerCAmelCase = key.split("." )
__lowerCAmelCase = int(key_split[3] )
__lowerCAmelCase = int(key_split[5] )
__lowerCAmelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
__lowerCAmelCase = val
return orig_state_dict
def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any=None , lowerCamelCase : Tuple=False ):
'''simple docstring'''
__lowerCAmelCase = DonutModel.from_pretrained(lowercase_ ).eval()
# load HuggingFace model
__lowerCAmelCase = get_configs(lowercase_ )
__lowerCAmelCase = DonutSwinModel(lowercase_ )
__lowerCAmelCase = MBartForCausalLM(lowercase_ )
__lowerCAmelCase = VisionEncoderDecoderModel(encoder=lowercase_ , decoder=lowercase_ )
model.eval()
__lowerCAmelCase = original_model.state_dict()
__lowerCAmelCase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# verify results on scanned document
__lowerCAmelCase = load_dataset("hf-internal-testing/example-documents" )
__lowerCAmelCase = dataset["test"][0]["image"].convert("RGB" )
__lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained(lowercase_ , from_slow=lowercase_ )
__lowerCAmelCase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
__lowerCAmelCase = DonutProcessor(lowercase_ , lowercase_ )
__lowerCAmelCase = processor(lowercase_ , return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
__lowerCAmelCase = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
__lowerCAmelCase = "When is the coffee break?"
__lowerCAmelCase = task_prompt.replace("{user_input}" , lowercase_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
__lowerCAmelCase = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
__lowerCAmelCase = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
__lowerCAmelCase = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
__lowerCAmelCase = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
__lowerCAmelCase = "hello world"
else:
raise ValueError("Model name not supported" )
__lowerCAmelCase = original_model.decoder.tokenizer(lowercase_ , add_special_tokens=lowercase_ , return_tensors="pt" )[
"input_ids"
]
__lowerCAmelCase = original_model.encoder.model.patch_embed(lowercase_ )
__lowerCAmelCase = model.encoder.embeddings(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1e-3 )
# verify encoder hidden states
__lowerCAmelCase = original_model.encoder(lowercase_ )
__lowerCAmelCase = model.encoder(lowercase_ ).last_hidden_state
assert torch.allclose(lowercase_ , lowercase_ , atol=1e-2 )
# verify decoder hidden states
__lowerCAmelCase = original_model(lowercase_ , lowercase_ , lowercase_ ).logits
__lowerCAmelCase = model(lowercase_ , decoder_input_ids=lowercase_ ).logits
assert torch.allclose(lowercase_ , lowercase_ , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
if __name__ == "__main__":
lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 706
|
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowerCAmelCase : Optional[Any] = '''scheduler_config.json'''
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : str = 1
a : Optional[int] = 2
a : int = 3
a : Union[str, Any] = 4
a : int = 5
a : Optional[int] = 6
a : str = 7
a : List[Any] = 8
a : List[str] = 9
a : List[str] = 1_0
a : int = 1_1
a : Any = 1_2
a : Any = 1_3
a : Tuple = 1_4
@dataclass
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : torch.FloatTensor
class UpperCAmelCase__ :
a : Tuple = SCHEDULER_CONFIG_NAME
a : Union[str, Any] = []
a : str = True
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase=False , **UpperCamelCase , ) -> int:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = cls.load_config(
pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , )
return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False , **UpperCamelCase ) -> Dict:
self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase )
@property
def UpperCAmelCase_ ( self ) -> str:
return self._get_compatibles()
@classmethod
def UpperCAmelCase_ ( cls ) -> Tuple:
__lowerCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
__lowerCAmelCase = importlib.import_module(__name__.split("." )[0] )
__lowerCAmelCase = [
getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase )
]
return compatible_classes
| 39
| 0
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=32 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=[10, 20, 30, 40] , UpperCamelCase=[2, 2, 3, 2] , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=10 , UpperCamelCase=0.02 , UpperCamelCase=["stage2", "stage3", "stage4"] , UpperCamelCase=[2, 3, 4] , UpperCamelCase=None , ) -> str:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_stages
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = initializer_range
__lowerCAmelCase = out_features
__lowerCAmelCase = out_indices
__lowerCAmelCase = scope
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> Tuple:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_snake_case , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
__lowerCAmelCase = ConvNextModel(config=_snake_case )
model.to(_snake_case )
model.eval()
__lowerCAmelCase = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
__lowerCAmelCase = ConvNextForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
__lowerCAmelCase = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
__lowerCAmelCase = ConvNextBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
__lowerCAmelCase = model(_snake_case )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowerCAmelCase = None
__lowerCAmelCase = ConvNextBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
__lowerCAmelCase = model(_snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
a : Any = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
a : Optional[Any] = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
a : Dict = True
a : Optional[int] = False
a : str = False
a : Tuple = False
a : List[str] = False
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = ConvNextModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def UpperCAmelCase_ ( self ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return
@unittest.skip(reason="ConvNext does not use inputs_embeds" )
def UpperCAmelCase_ ( self ) -> Tuple:
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings" )
def UpperCAmelCase_ ( self ) -> str:
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking" )
def UpperCAmelCase_ ( self ) -> Tuple:
pass
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_snake_case )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _snake_case )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_snake_case )
def UpperCAmelCase_ ( self ) -> Any:
def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
__lowerCAmelCase = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(_snake_case , _snake_case ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = ConvNextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self ) -> List[Any]:
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(_snake_case )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_snake_case , return_tensors="pt" ).to(_snake_case )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**_snake_case )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _snake_case )
__lowerCAmelCase = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase , UpperCAmelCase_ ):
a : Dict = (ConvNextBackbone,) if is_torch_available() else ()
a : Tuple = ConvNextConfig
a : Union[str, Any] = False
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = ConvNextModelTester(self )
| 707
|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase : List[Any] = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase = None ) -> Union[str, Any]:
__lowerCAmelCase = (
os.path.join(UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__lowerCAmelCase = Extractor
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__lowerCAmelCase = os.path.abspath(UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase ) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> bool:
return force_extract or (
not os.path.isfile(UpperCamelCase ) and not (os.path.isdir(UpperCamelCase ) and os.listdir(UpperCamelCase ))
)
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False ) -> str:
__lowerCAmelCase = self.extractor.infer_extractor_format(UpperCamelCase )
if not extractor_format:
return input_path
__lowerCAmelCase = self._get_output_path(UpperCamelCase )
if self._do_extract(UpperCamelCase , UpperCamelCase ):
self.extractor.extract(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return output_path
class UpperCAmelCase__ ( UpperCamelCase__ ):
@classmethod
@abstractmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool:
...
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
...
class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
a : List[bytes] = []
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
with open(UpperCamelCase , "rb" ) as f:
return f.read(UpperCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool:
if not magic_number:
__lowerCAmelCase = max(len(UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
__lowerCAmelCase = cls.read_magic_number(UpperCamelCase , UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase__ ( UpperCamelCase__ ):
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool:
return tarfile.is_tarfile(UpperCamelCase )
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict:
def resolved(UpperCamelCase ) -> str:
return os.path.realpath(os.path.abspath(UpperCamelCase ) )
def badpath(UpperCamelCase , UpperCamelCase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCamelCase , UpperCamelCase ) ).startswith(UpperCamelCase )
def badlink(UpperCamelCase , UpperCamelCase ) -> bool:
# Links are interpreted relative to the directory containing the link
__lowerCAmelCase = resolved(os.path.join(UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=UpperCamelCase )
__lowerCAmelCase = resolved(UpperCamelCase )
for finfo in members:
if badpath(finfo.name , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(UpperCamelCase , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(UpperCamelCase , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
__lowerCAmelCase = tarfile.open(UpperCamelCase )
tar_file.extractall(UpperCamelCase , members=TarExtractor.safemembers(UpperCamelCase , UpperCamelCase ) )
tar_file.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x1F\x8B"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with gzip.open(UpperCamelCase , "rb" ) as gzip_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : List[Any] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool:
if super().is_extractable(UpperCamelCase , magic_number=UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCamelCase , "rb" ) as fp:
__lowerCAmelCase = _EndRecData(UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__lowerCAmelCase = fp.read(UpperCamelCase ) # CD is where we expect it to be
if len(UpperCamelCase ) == sizeCentralDir:
__lowerCAmelCase = struct.unpack(UpperCamelCase , UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with zipfile.ZipFile(UpperCamelCase , "r" ) as zip_file:
zip_file.extractall(UpperCamelCase )
zip_file.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Tuple = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with lzma.open(UpperCamelCase ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : str = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
__lowerCAmelCase = rarfile.RarFile(UpperCamelCase )
rf.extractall(UpperCamelCase )
rf.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : int = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
__lowerCAmelCase = zstd.ZstdDecompressor()
with open(UpperCamelCase , "rb" ) as ifh, open(UpperCamelCase , "wb" ) as ofh:
dctx.copy_stream(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with bza.open(UpperCamelCase , "rb" ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with pyazr.SevenZipFile(UpperCamelCase , "r" ) as archive:
archive.extractall(UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(UpperCamelCase , "rb" ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
a : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCAmelCase_ ( cls ) -> Optional[Any]:
return max(
len(UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(UpperCamelCase , UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict:
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase , magic_number_length=UpperCamelCase )
except OSError:
return b""
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = False ) -> bool:
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=UpperCamelCase , )
__lowerCAmelCase = cls.infer_extractor_format(UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase ) -> str: # <Added version="2.4.0"/>
__lowerCAmelCase = cls._get_magic_number_max_length()
__lowerCAmelCase = cls._read_magic_number(UpperCamelCase , UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCamelCase , magic_number=UpperCamelCase ):
return extractor_format
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(UpperCamelCase ) , exist_ok=UpperCamelCase )
# Prevent parallel extractions
__lowerCAmelCase = str(Path(UpperCamelCase ).with_suffix(".lock" ) )
with FileLock(UpperCamelCase ):
shutil.rmtree(UpperCamelCase , ignore_errors=UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCamelCase , UpperCamelCase ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=UpperCamelCase , )
__lowerCAmelCase = extractor if extractor != "deprecated" else extractor_format
else:
__lowerCAmelCase = cls.extractors[extractor_format]
return extractor.extract(UpperCamelCase , UpperCamelCase )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCamelCase ):
return extractor.extract(UpperCamelCase , UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( _lowerCAmelCase ):
a : str = (UnCLIPScheduler,)
def UpperCAmelCase_ ( self , **UpperCamelCase ) -> Tuple:
__lowerCAmelCase = {
"num_train_timesteps": 1000,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**_lowerCAmelCase )
return config
def UpperCAmelCase_ ( self ) -> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def UpperCAmelCase_ ( self ) -> Any:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCAmelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def UpperCAmelCase_ ( self ) -> int:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(variance_type="fixed_small_log" )
__lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(variance_type="learned_range" )
__lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
__lowerCAmelCase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_01_00_11 < 1E-5
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
__lowerCAmelCase = scheduler.timesteps
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
__lowerCAmelCase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(25 )
__lowerCAmelCase = scheduler.timesteps
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
__lowerCAmelCase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
__lowerCAmelCase = None
else:
__lowerCAmelCase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase = scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> List[str]:
pass
| 708
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self ) -> List[str]:
# test for the above condition
self.test()
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = 0
__lowerCAmelCase = False
while not completed:
if counter == 1:
self.reset()
__lowerCAmelCase = self.advance()
if not self.does_advance(UpperCamelCase ):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.update(UpperCamelCase )
counter += 1
if counter > 1_0000:
raise Exception("update() does not fulfill the constraint." )
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly." )
@abstractmethod
def UpperCAmelCase_ ( self ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self ) -> int:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self ) -> int:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> str:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self , UpperCamelCase ) -> Dict:
super(UpperCamelCase , self ).__init__()
if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
__lowerCAmelCase = token_ids
__lowerCAmelCase = len(self.token_ids )
__lowerCAmelCase = -1 # the index of the currently fulfilled step
__lowerCAmelCase = False
def UpperCAmelCase_ ( self ) -> Optional[int]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
if self.does_advance(UpperCamelCase ):
self.fulfilled_idx += 1
__lowerCAmelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
__lowerCAmelCase = True
__lowerCAmelCase = completed
else:
# failed to make progress.
__lowerCAmelCase = True
self.reset()
return stepped, completed, reset
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = False
__lowerCAmelCase = 0
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Optional[Any]:
__lowerCAmelCase = PhrasalConstraint(self.token_ids )
if stateful:
__lowerCAmelCase = self.seqlen
__lowerCAmelCase = self.fulfilled_idx
__lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase , UpperCamelCase=True ) -> Optional[int]:
__lowerCAmelCase = max([len(UpperCamelCase ) for one in nested_token_ids] )
__lowerCAmelCase = {}
for token_ids in nested_token_ids:
__lowerCAmelCase = root
for tidx, token_id in enumerate(UpperCamelCase ):
if token_id not in level:
__lowerCAmelCase = {}
__lowerCAmelCase = level[token_id]
if no_subsets and self.has_subsets(UpperCamelCase , UpperCamelCase ):
raise ValueError(
"Each list in `nested_token_ids` can't be a complete subset of another list, but is"
F''' {nested_token_ids}.''' )
__lowerCAmelCase = root
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
__lowerCAmelCase = self.trie
for current_token in current_seq:
__lowerCAmelCase = start[current_token]
__lowerCAmelCase = list(start.keys() )
return next_tokens
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
__lowerCAmelCase = self.next_tokens(UpperCamelCase )
return len(UpperCamelCase ) == 0
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = list(root.values() )
if len(UpperCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCamelCase ) for nn in next_nodes] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
__lowerCAmelCase = self.count_leaves(UpperCamelCase )
return len(UpperCamelCase ) != leaf_count
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self , UpperCamelCase ) -> List[Any]:
super(UpperCamelCase , self ).__init__()
if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(UpperCamelCase , UpperCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
__lowerCAmelCase = DisjunctiveTrie(UpperCamelCase )
__lowerCAmelCase = nested_token_ids
__lowerCAmelCase = self.trie.max_height
__lowerCAmelCase = []
__lowerCAmelCase = False
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = self.trie.next_tokens(self.current_seq )
if len(UpperCamelCase ) == 0:
return None
else:
return token_list
def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[str]:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
if self.does_advance(UpperCamelCase ):
self.current_seq.append(UpperCamelCase )
__lowerCAmelCase = True
else:
__lowerCAmelCase = True
self.reset()
__lowerCAmelCase = self.trie.reached_leaf(self.current_seq )
__lowerCAmelCase = completed
return stepped, completed, reset
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = False
__lowerCAmelCase = []
def UpperCAmelCase_ ( self ) -> int:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Union[str, Any]:
__lowerCAmelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
__lowerCAmelCase = self.seqlen
__lowerCAmelCase = self.current_seq
__lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase ) -> Union[str, Any]:
__lowerCAmelCase = constraints
# max # of steps required to fulfill a given constraint
__lowerCAmelCase = max([c.seqlen for c in constraints] )
__lowerCAmelCase = len(UpperCamelCase )
__lowerCAmelCase = False
self.init_state()
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = []
__lowerCAmelCase = None
__lowerCAmelCase = [constraint.copy(stateful=UpperCamelCase ) for constraint in self.constraints]
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__lowerCAmelCase = constraint.advance()
if isinstance(UpperCamelCase , UpperCamelCase ):
token_list.append(UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
token_list.extend(UpperCamelCase )
else:
__lowerCAmelCase = self.inprogress_constraint.advance()
if isinstance(UpperCamelCase , UpperCamelCase ):
token_list.append(UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
token_list.extend(UpperCamelCase )
if len(UpperCamelCase ) == 0:
return None
else:
return token_list
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__lowerCAmelCase , __lowerCAmelCase = self.add(UpperCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Dict:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
__lowerCAmelCase , __lowerCAmelCase = False, False
if self.completed:
__lowerCAmelCase = True
__lowerCAmelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.inprogress_constraint.update(UpperCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase ) )
__lowerCAmelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__lowerCAmelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
__lowerCAmelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCamelCase ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = pending_constraint.update(UpperCamelCase )
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true." )
if complete:
self.complete_constraints.append(UpperCamelCase )
__lowerCAmelCase = None
if not complete and stepped:
__lowerCAmelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__lowerCAmelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__lowerCAmelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCAmelCase_ ( self , UpperCamelCase=True ) -> str:
__lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__lowerCAmelCase = [
constraint.copy(stateful=UpperCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__lowerCAmelCase = self.inprogress_constraint.copy(stateful=UpperCamelCase )
__lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 39
| 0
|
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase : Union[str, Any] = numpy.array([0, 0])
lowerCAmelCase : List[str] = numpy.array([0.5, 0.8660254])
lowerCAmelCase : List[str] = numpy.array([1, 0])
lowerCAmelCase : int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCAmelCase = initial_vectors
for _ in range(snake_case_ ):
__lowerCAmelCase = iteration_step(snake_case_ )
return vectors
def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = []
for i, start_vector in enumerate(vectors[:-1] ):
__lowerCAmelCase = vectors[i + 1]
new_vectors.append(snake_case_ )
__lowerCAmelCase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __lowerCAmelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = numpy.radians(snake_case_ )
__lowerCAmelCase = numpy.cos(snake_case_ ), numpy.sin(snake_case_ )
__lowerCAmelCase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(snake_case_ , snake_case_ )
def __lowerCAmelCase ( lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCAmelCase = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__lowerCAmelCase = zip(*snake_case_ )
plt.plot(snake_case_ , snake_case_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : Tuple = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 709
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ):
a : List[Any] = KandinskyImgaImgPipeline
a : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
a : List[Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
a : Any = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Union[str, Any] = False
@property
def UpperCAmelCase_ ( self ) -> int:
return 32
@property
def UpperCAmelCase_ ( self ) -> List[str]:
return 32
@property
def UpperCAmelCase_ ( self ) -> Dict:
return self.time_input_dim
@property
def UpperCAmelCase_ ( self ) -> int:
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ) -> int:
return 100
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
__lowerCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__lowerCAmelCase = MultilingualCLIP(UpperCamelCase )
__lowerCAmelCase = text_encoder.eval()
return text_encoder
@property
def UpperCAmelCase_ ( self ) -> List[str]:
torch.manual_seed(0 )
__lowerCAmelCase = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__lowerCAmelCase = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def UpperCAmelCase_ ( self ) -> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = self.dummy_tokenizer
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
__lowerCAmelCase = DDIMScheduler(**UpperCamelCase )
__lowerCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=0 ) -> Optional[Any]:
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((256, 256) )
if str(UpperCamelCase ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(UpperCamelCase )
else:
__lowerCAmelCase = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
__lowerCAmelCase = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = "cpu"
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**UpperCamelCase )
__lowerCAmelCase = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__lowerCAmelCase = "A red cartoon frog, 4k"
__lowerCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
__lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__lowerCAmelCase = pipeline(
UpperCamelCase , image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> int:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(UpperCamelCase__ ):
__lowerCAmelCase = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
__lowerCAmelCase = FlaxAutoModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(UpperCamelCase__ ):
__lowerCAmelCase = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
__lowerCAmelCase = FlaxAutoModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ )
__lowerCAmelCase = FlaxBertModel.from_pretrained(UpperCamelCase__ )
__lowerCAmelCase = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCamelCase ):
return model(**UpperCamelCase__ )
eval(**UpperCamelCase__ ).block_until_ready()
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
for model_name in ["roberta-base", "roberta-large"]:
__lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ )
__lowerCAmelCase = FlaxRobertaModel.from_pretrained(UpperCamelCase__ )
__lowerCAmelCase = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCamelCase ):
return model(**UpperCamelCase__ )
eval(**UpperCamelCase__ ).block_until_ready()
def UpperCAmelCase_ ( self ) -> Dict:
with self.assertRaisesRegex(
UpperCamelCase__ , "bert-base is not a local folder and is not a valid model identifier" ):
__lowerCAmelCase = FlaxAutoModel.from_pretrained("bert-base" )
def UpperCAmelCase_ ( self ) -> int:
with self.assertRaisesRegex(
UpperCamelCase__ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__lowerCAmelCase = FlaxAutoModel.from_pretrained(UpperCamelCase__ , revision="aaaaaa" )
def UpperCAmelCase_ ( self ) -> Any:
with self.assertRaisesRegex(
UpperCamelCase__ , "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" , ):
__lowerCAmelCase = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCAmelCase_ ( self ) -> Dict:
with self.assertRaisesRegex(UpperCamelCase__ , "Use `from_pt=True` to load this model" ):
__lowerCAmelCase = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 710
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
lowerCAmelCase : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class UpperCAmelCase__ :
a : Optional[str] = field(default=UpperCamelCase__ , metadata={"""help""": """The input training data file (a text file)."""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCAmelCase_ ( self ) -> Tuple:
if self.train_file is not None:
__lowerCAmelCase = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__lowerCAmelCase = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCAmelCase__ :
a : PreTrainedTokenizerBase
a : Union[bool, str, PaddingStrategy] = True
a : Optional[int] = None
a : Optional[int] = None
def __call__( self , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = "label" if "label" in features[0].keys() else "labels"
__lowerCAmelCase = [feature.pop(UpperCamelCase ) for feature in features]
__lowerCAmelCase = len(UpperCamelCase )
__lowerCAmelCase = len(features[0]["input_ids"] )
__lowerCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCamelCase )] for feature in features
]
__lowerCAmelCase = list(chain(*UpperCamelCase ) )
__lowerCAmelCase = self.tokenizer.pad(
UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
__lowerCAmelCase = {k: v.view(UpperCamelCase , UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
__lowerCAmelCase = torch.tensor(UpperCamelCase , dtype=torch.intaa )
return batch
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__lowerCAmelCase = {}
if data_args.train_file is not None:
__lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCAmelCase = data_args.validation_file
__lowerCAmelCase = data_args.train_file.split("." )[-1]
__lowerCAmelCase = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__lowerCAmelCase = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__lowerCAmelCase = [f'''ending{i}''' for i in range(4 )]
__lowerCAmelCase = "sent1"
__lowerCAmelCase = "sent2"
if data_args.max_seq_length is None:
__lowerCAmelCase = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
__lowerCAmelCase = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
__lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase : Tuple ):
__lowerCAmelCase = [[context] * 4 for context in examples[context_name]]
__lowerCAmelCase = examples[question_header_name]
__lowerCAmelCase = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
__lowerCAmelCase = list(chain(*lowerCamelCase ) )
__lowerCAmelCase = list(chain(*lowerCamelCase ) )
# Tokenize
__lowerCAmelCase = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
__lowerCAmelCase = raw_datasets["train"]
if data_args.max_train_samples is not None:
__lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_train_samples )
__lowerCAmelCase = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
__lowerCAmelCase = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
__lowerCAmelCase = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
__lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_eval_samples )
__lowerCAmelCase = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
__lowerCAmelCase = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__lowerCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase : Dict ):
__lowerCAmelCase , __lowerCAmelCase = eval_predictions
__lowerCAmelCase = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
__lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("train" , lowerCamelCase )
trainer.save_metrics("train" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
__lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("eval" , lowerCamelCase )
trainer.save_metrics("eval" , lowerCamelCase )
__lowerCAmelCase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 39
| 0
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCAmelCase : Any = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : Optional[Any] = '''MobileNetV1Config'''
# Base docstring
lowerCAmelCase : int = '''google/mobilenet_v1_1.0_224'''
lowerCAmelCase : Dict = [1, 1_0_2_4, 7, 7]
# Image classification docstring
lowerCAmelCase : List[str] = '''google/mobilenet_v1_1.0_224'''
lowerCAmelCase : Optional[int] = '''tabby, tabby cat'''
lowerCAmelCase : Any = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None ):
'''simple docstring'''
__lowerCAmelCase = {}
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__lowerCAmelCase = model.mobilenet_va
else:
__lowerCAmelCase = model
__lowerCAmelCase = "MobilenetV1/Conv2d_0/"
__lowerCAmelCase = backbone.conv_stem.convolution.weight
__lowerCAmelCase = backbone.conv_stem.normalization.bias
__lowerCAmelCase = backbone.conv_stem.normalization.weight
__lowerCAmelCase = backbone.conv_stem.normalization.running_mean
__lowerCAmelCase = backbone.conv_stem.normalization.running_var
for i in range(13 ):
__lowerCAmelCase = i + 1
__lowerCAmelCase = i * 2
__lowerCAmelCase = backbone.layer[pt_index]
__lowerCAmelCase = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
__lowerCAmelCase = pointer.convolution.weight
__lowerCAmelCase = pointer.normalization.bias
__lowerCAmelCase = pointer.normalization.weight
__lowerCAmelCase = pointer.normalization.running_mean
__lowerCAmelCase = pointer.normalization.running_var
__lowerCAmelCase = backbone.layer[pt_index + 1]
__lowerCAmelCase = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
__lowerCAmelCase = pointer.convolution.weight
__lowerCAmelCase = pointer.normalization.bias
__lowerCAmelCase = pointer.normalization.weight
__lowerCAmelCase = pointer.normalization.running_mean
__lowerCAmelCase = pointer.normalization.running_var
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__lowerCAmelCase = "MobilenetV1/Logits/Conv2d_1c_1x1/"
__lowerCAmelCase = model.classifier.weight
__lowerCAmelCase = model.classifier.bias
return tf_to_pt_map
def __lowerCAmelCase ( lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple ):
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
__lowerCAmelCase = tf.train.list_variables(lowerCamelCase_ )
__lowerCAmelCase = {}
for name, shape in init_vars:
logger.info(f'''Loading TF weight {name} with shape {shape}''' )
__lowerCAmelCase = tf.train.load_variable(lowerCamelCase_ , lowerCamelCase_ )
__lowerCAmelCase = array
# Build TF to PyTorch weights loading map
__lowerCAmelCase = _build_tf_to_pytorch_map(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(f'''Importing {name}''' )
if name not in tf_weights:
logger.info(f'''{name} not in tf pre-trained weights, skipping''' )
continue
__lowerCAmelCase = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
__lowerCAmelCase = np.transpose(lowerCamelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
__lowerCAmelCase = array.squeeze().transpose()
else:
__lowerCAmelCase = np.transpose(lowerCamelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(f'''Initialize PyTorch weight {name} {array.shape}''' )
__lowerCAmelCase = torch.from_numpy(lowerCamelCase_ )
tf_weights.pop(lowerCamelCase_ , lowerCamelCase_ )
tf_weights.pop(name + "/RMSProp" , lowerCamelCase_ )
tf_weights.pop(name + "/RMSProp_1" , lowerCamelCase_ )
tf_weights.pop(name + "/ExponentialMovingAverage" , lowerCamelCase_ )
logger.info(f'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' )
return model
def __lowerCAmelCase ( lowerCamelCase : torch.Tensor , lowerCamelCase : nn.Convad ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = features.shape[-2:]
__lowerCAmelCase , __lowerCAmelCase = conv_layer.stride
__lowerCAmelCase , __lowerCAmelCase = conv_layer.kernel_size
if in_height % stride_height == 0:
__lowerCAmelCase = max(kernel_height - stride_height , 0 )
else:
__lowerCAmelCase = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__lowerCAmelCase = max(kernel_width - stride_width , 0 )
else:
__lowerCAmelCase = max(kernel_width - (in_width % stride_width) , 0 )
__lowerCAmelCase = pad_along_width // 2
__lowerCAmelCase = pad_along_width - pad_left
__lowerCAmelCase = pad_along_height // 2
__lowerCAmelCase = pad_along_height - pad_top
__lowerCAmelCase = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCamelCase_ , lowerCamelCase_ , "constant" , 0.0 )
class UpperCAmelCase__ ( nn.Module ):
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 , UpperCamelCase = 1 , UpperCamelCase = False , UpperCamelCase = True , UpperCamelCase = True , ) -> None:
super().__init__()
__lowerCAmelCase = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
__lowerCAmelCase = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__lowerCAmelCase = nn.Convad(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , kernel_size=_lowerCamelCase , stride=_lowerCamelCase , padding=_lowerCamelCase , groups=_lowerCamelCase , bias=_lowerCamelCase , padding_mode="zeros" , )
if use_normalization:
__lowerCAmelCase = nn.BatchNormad(
num_features=_lowerCamelCase , eps=config.layer_norm_eps , momentum=0.99_97 , affine=_lowerCamelCase , track_running_stats=_lowerCamelCase , )
else:
__lowerCAmelCase = None
if use_activation:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__lowerCAmelCase = ACTaFN[use_activation]
elif isinstance(config.hidden_act , _lowerCamelCase ):
__lowerCAmelCase = ACTaFN[config.hidden_act]
else:
__lowerCAmelCase = config.hidden_act
else:
__lowerCAmelCase = None
def UpperCAmelCase_ ( self , UpperCamelCase ) -> torch.Tensor:
if self.config.tf_padding:
__lowerCAmelCase = apply_tf_padding(_lowerCamelCase , self.convolution )
__lowerCAmelCase = self.convolution(_lowerCamelCase )
if self.normalization is not None:
__lowerCAmelCase = self.normalization(_lowerCamelCase )
if self.activation is not None:
__lowerCAmelCase = self.activation(_lowerCamelCase )
return features
class UpperCAmelCase__ ( lowerCAmelCase__ ):
a : Optional[int] = MobileNetVaConfig
a : Union[str, Any] = load_tf_weights_in_mobilenet_va
a : Optional[Any] = "mobilenet_v1"
a : int = "pixel_values"
a : Union[str, Any] = False
def UpperCAmelCase_ ( self , UpperCamelCase ) -> None:
if isinstance(_lowerCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_lowerCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
lowerCAmelCase : Optional[Any] = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCAmelCase : Optional[int] = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , lowerCAmelCase__ , )
class UpperCAmelCase__ ( lowerCAmelCase__ ):
def __init__( self , UpperCamelCase , UpperCamelCase = True ) -> str:
super().__init__(_lowerCamelCase )
__lowerCAmelCase = config
__lowerCAmelCase = 32
__lowerCAmelCase = max(int(depth * config.depth_multiplier ) , config.min_depth )
__lowerCAmelCase = MobileNetVaConvLayer(
_lowerCamelCase , in_channels=config.num_channels , out_channels=_lowerCamelCase , kernel_size=3 , stride=2 , )
__lowerCAmelCase = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__lowerCAmelCase = nn.ModuleList()
for i in range(13 ):
__lowerCAmelCase = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__lowerCAmelCase = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_lowerCamelCase , in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , kernel_size=3 , stride=strides[i] , groups=_lowerCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
_lowerCamelCase , in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , kernel_size=1 , ) )
__lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Tuple:
raise NotImplementedError
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase_ ( self , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
__lowerCAmelCase = self.conv_stem(_lowerCamelCase )
__lowerCAmelCase = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__lowerCAmelCase = layer_module(_lowerCamelCase )
if output_hidden_states:
__lowerCAmelCase = all_hidden_states + (hidden_states,)
__lowerCAmelCase = hidden_states
if self.pooler is not None:
__lowerCAmelCase = torch.flatten(self.pooler(_lowerCamelCase ) , start_dim=1 )
else:
__lowerCAmelCase = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowerCamelCase , pooler_output=_lowerCamelCase , hidden_states=_lowerCamelCase , )
@add_start_docstrings(
"""\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n """ , lowerCAmelCase__ , )
class UpperCAmelCase__ ( lowerCAmelCase__ ):
def __init__( self , UpperCamelCase ) -> None:
super().__init__(_lowerCamelCase )
__lowerCAmelCase = config.num_labels
__lowerCAmelCase = MobileNetVaModel(_lowerCamelCase )
__lowerCAmelCase = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__lowerCAmelCase = nn.Dropout(config.classifier_dropout_prob , inplace=_lowerCamelCase )
__lowerCAmelCase = nn.Linear(_lowerCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase_ ( self , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = self.mobilenet_va(_lowerCamelCase , output_hidden_states=_lowerCamelCase , return_dict=_lowerCamelCase )
__lowerCAmelCase = outputs.pooler_output if return_dict else outputs[1]
__lowerCAmelCase = self.classifier(self.dropout(_lowerCamelCase ) )
__lowerCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCAmelCase = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCAmelCase = "single_label_classification"
else:
__lowerCAmelCase = "multi_label_classification"
if self.config.problem_type == "regression":
__lowerCAmelCase = MSELoss()
if self.num_labels == 1:
__lowerCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowerCAmelCase = loss_fct(_lowerCamelCase , _lowerCamelCase )
elif self.config.problem_type == "single_label_classification":
__lowerCAmelCase = CrossEntropyLoss()
__lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowerCAmelCase = BCEWithLogitsLoss()
__lowerCAmelCase = loss_fct(_lowerCamelCase , _lowerCamelCase )
if not return_dict:
__lowerCAmelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_lowerCamelCase , logits=_lowerCamelCase , hidden_states=outputs.hidden_states , )
| 711
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {}
lowerCAmelCase : Dict[Optional[str], str] = {}
lowerCAmelCase : Dict[Optional[str], Exception] = {}
def __lowerCAmelCase ( lowerCamelCase : type , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None , ):
'''simple docstring'''
__lowerCAmelCase = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
__lowerCAmelCase = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
__lowerCAmelCase = format_type
def __lowerCAmelCase ( lowerCamelCase : Exception , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None ):
'''simple docstring'''
__lowerCAmelCase = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__lowerCAmelCase = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
lowerCAmelCase : Optional[int] = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
lowerCAmelCase : str = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
lowerCAmelCase : Any = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def __lowerCAmelCase ( lowerCamelCase : Optional[str] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __lowerCAmelCase ( lowerCamelCase : Optional[str] , **lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCAmelCase = get_format_type_from_alias(lowerCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 39
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : bool , lowerCamelCase : list[int] , lowerCamelCase : float ):
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
__lowerCAmelCase = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 712
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCAmelCase ( lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
__lowerCAmelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCAmelCase = [4, 4, 4, 4]
__lowerCAmelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
else:
__lowerCAmelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCAmelCase = 96
elif "small" in model_name:
__lowerCAmelCase = 96
elif "base" in model_name:
__lowerCAmelCase = 1_28
elif "large" in model_name:
__lowerCAmelCase = 1_92
elif "xlarge" in model_name:
__lowerCAmelCase = 2_56
elif "huge" in model_name:
__lowerCAmelCase = 3_52
# set label information
__lowerCAmelCase = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
__lowerCAmelCase = "imagenet-22k-id2label.json"
else:
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = FocalNetConfig(
embed_dim=lowerCamelCase , depths=lowerCamelCase , focal_levels=lowerCamelCase , focal_windows=lowerCamelCase , use_conv_embed=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase , use_post_layernorm=lowerCamelCase , use_layerscale=lowerCamelCase , )
return config
def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__lowerCAmelCase = "encoder." + name
if "encoder.layers" in name:
__lowerCAmelCase = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
__lowerCAmelCase = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
__lowerCAmelCase = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCAmelCase = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCAmelCase = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCAmelCase = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
__lowerCAmelCase = "layernorm.weight"
if name == "norm.bias":
__lowerCAmelCase = "layernorm.bias"
if "head" in name:
__lowerCAmelCase = name.replace("head" , "classifier" )
else:
__lowerCAmelCase = "focalnet." + name
return name
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
__lowerCAmelCase = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
__lowerCAmelCase = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCamelCase )
__lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
__lowerCAmelCase = state_dict.pop(lowerCamelCase )
__lowerCAmelCase = val
__lowerCAmelCase = get_focalnet_config(lowerCamelCase )
__lowerCAmelCase = FocalNetForImageClassification(lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase )
# verify conversion
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = BitImageProcessor(
do_resize=lowerCamelCase , size={"shortest_edge": 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase , crop_size=2_24 , do_normalize=lowerCamelCase , image_mean=lowerCamelCase , image_std=lowerCamelCase , )
__lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
__lowerCAmelCase = processor(images=lowerCamelCase , return_tensors="pt" )
__lowerCAmelCase = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
__lowerCAmelCase = image_transforms(lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase , atol=1e-4 )
__lowerCAmelCase = model(**lowerCamelCase )
__lowerCAmelCase = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCAmelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
__lowerCAmelCase = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
__lowerCAmelCase = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
__lowerCAmelCase = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
__lowerCAmelCase = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
__lowerCAmelCase = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print(f'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(f'''{model_name}''' )
processor.push_to_hub(f'''{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 39
| 0
|
'''simple docstring'''
def __lowerCAmelCase ( lowerCamelCase : Optional[int] = 10 ):
'''simple docstring'''
if not isinstance(a__ , a__ ) or n < 0:
raise ValueError("Invalid input" )
__lowerCAmelCase = 10**n
__lowerCAmelCase = 2_84_33 * (pow(2 , 7_83_04_57 , a__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'{solution(1_0) = }')
| 713
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase : str = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase : Optional[Any] = {
'''squeezebert/squeezebert-uncased''': 5_1_2,
'''squeezebert/squeezebert-mnli''': 5_1_2,
'''squeezebert/squeezebert-mnli-headless''': 5_1_2,
}
lowerCAmelCase : Tuple = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Dict = VOCAB_FILES_NAMES
a : Any = PRETRAINED_VOCAB_FILES_MAP
a : Dict = PRETRAINED_INIT_CONFIGURATION
a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[Any] = SqueezeBertTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ) -> List[Any]:
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(UpperCamelCase , normalizer_state.pop("type" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**UpperCamelCase )
__lowerCAmelCase = do_lower_case
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None ) -> str:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
def __lowerCAmelCase ( lowerCamelCase : list[list[float]] ):
'''simple docstring'''
__lowerCAmelCase = []
for data in source_data:
for i, el in enumerate(UpperCamelCase__ ):
if len(UpperCamelCase__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(UpperCamelCase__ ) )
return data_lists
def __lowerCAmelCase ( lowerCamelCase : list[list[float]] , lowerCamelCase : list[int] ):
'''simple docstring'''
__lowerCAmelCase = []
for dlist, weight in zip(UpperCamelCase__ , UpperCamelCase__ ):
__lowerCAmelCase = min(UpperCamelCase__ )
__lowerCAmelCase = max(UpperCamelCase__ )
__lowerCAmelCase = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
__lowerCAmelCase = f'''Invalid weight of {weight:f} provided'''
raise ValueError(UpperCamelCase__ )
score_lists.append(UpperCamelCase__ )
return score_lists
def __lowerCAmelCase ( lowerCamelCase : list[list[float]] ):
'''simple docstring'''
__lowerCAmelCase = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(UpperCamelCase__ ):
__lowerCAmelCase = final_scores[j] + ele
return final_scores
def __lowerCAmelCase ( lowerCamelCase : list[list[float]] , lowerCamelCase : list[int] ):
'''simple docstring'''
__lowerCAmelCase = get_data(UpperCamelCase__ )
__lowerCAmelCase = calculate_each_score(UpperCamelCase__ , UpperCamelCase__ )
__lowerCAmelCase = generate_final_scores(UpperCamelCase__ )
# append scores to source data
for i, ele in enumerate(UpperCamelCase__ ):
source_data[i].append(UpperCamelCase__ )
return source_data
| 714
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(lowerCamelCase ) / len(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
| 0
|
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase__ ( __UpperCAmelCase ):
def __lt__( self , UpperCamelCase ) -> List[str]:
return self[-1] < other[-1]
def __eq__( self , UpperCamelCase ) -> Union[str, Any]:
return self[-1] == other[-1]
def __lowerCAmelCase ( lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = []
# sort into stacks
for element in collection:
__lowerCAmelCase = Stack([element] )
__lowerCAmelCase = bisect_left(_UpperCAmelCase , _UpperCAmelCase )
if i != len(_UpperCAmelCase ):
stacks[i].append(_UpperCAmelCase )
else:
stacks.append(_UpperCAmelCase )
# use a heap-based merge to merge stack efficiently
__lowerCAmelCase = merge(*(reversed(_UpperCAmelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowerCAmelCase : str = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase : Union[str, Any] = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 715
|
'''simple docstring'''
import re
def __lowerCAmelCase ( lowerCamelCase : str ):
'''simple docstring'''
__lowerCAmelCase = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowerCamelCase , lowerCamelCase ) )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 39
| 0
|
'''simple docstring'''
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
while second != 0:
__lowerCAmelCase = first & second
first ^= second
__lowerCAmelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : int = int(input('''Enter the first number: ''').strip())
lowerCAmelCase : Optional[int] = int(input('''Enter the second number: ''').strip())
print(f'{add(first, second) = }')
| 716
|
'''simple docstring'''
import os
import sys
import unittest
lowerCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = {"BertModelTest": "BertModelTester"}
__lowerCAmelCase = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase )
__lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase )
__lowerCAmelCase = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
__lowerCAmelCase = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
__lowerCAmelCase = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase ) -> None:
__lowerCAmelCase = len(__A )
__lowerCAmelCase = [0] * len_array
if len_array > 0:
__lowerCAmelCase = array[0]
for i in range(1 , __A ):
__lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def UpperCAmelCase_ ( self , UpperCamelCase ) -> bool:
__lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__A )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : torch.FloatTensor
class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self , UpperCamelCase = 16 , UpperCamelCase = 88 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = 1 , UpperCamelCase = 0.0 , UpperCamelCase = 32 , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = "geglu" , UpperCamelCase = True , UpperCamelCase = True , ) -> List[str]:
super().__init__()
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = attention_head_dim
__lowerCAmelCase = num_attention_heads * attention_head_dim
__lowerCAmelCase = in_channels
__lowerCAmelCase = torch.nn.GroupNorm(num_groups=UpperCamelCase , num_channels=UpperCamelCase , eps=1E-6 , affine=UpperCamelCase )
__lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase )
# 3. Define transformers blocks
__lowerCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , cross_attention_dim=UpperCamelCase , activation_fn=UpperCamelCase , attention_bias=UpperCamelCase , double_self_attention=UpperCamelCase , norm_elementwise_affine=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
__lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=1 , UpperCamelCase=None , UpperCamelCase = True , ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = hidden_states.shape
__lowerCAmelCase = batch_frames // num_frames
__lowerCAmelCase = hidden_states
__lowerCAmelCase = hidden_states[None, :].reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__lowerCAmelCase = self.norm(UpperCamelCase )
__lowerCAmelCase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = self.proj_in(UpperCamelCase )
# 2. Blocks
for block in self.transformer_blocks:
__lowerCAmelCase = block(
UpperCamelCase , encoder_hidden_states=UpperCamelCase , timestep=UpperCamelCase , cross_attention_kwargs=UpperCamelCase , class_labels=UpperCamelCase , )
# 3. Output
__lowerCAmelCase = self.proj_out(UpperCamelCase )
__lowerCAmelCase = (
hidden_states[None, None, :]
.reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__lowerCAmelCase = hidden_states.reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> List[str]:
debug_launcher(test_script.main )
def UpperCAmelCase_ ( self ) -> List[str]:
debug_launcher(test_ops.main )
| 718
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCAmelCase ( lowerCamelCase : bytes , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = f'''{sampling_rate}'''
__lowerCAmelCase = "1"
__lowerCAmelCase = "f32le"
__lowerCAmelCase = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__lowerCAmelCase = ffmpeg_process.communicate(lowerCamelCase )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
__lowerCAmelCase = output_stream[0]
__lowerCAmelCase = np.frombuffer(lowerCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : str = "f32le" , ):
'''simple docstring'''
__lowerCAmelCase = f'''{sampling_rate}'''
__lowerCAmelCase = "1"
if format_for_conversion == "s16le":
__lowerCAmelCase = 2
elif format_for_conversion == "f32le":
__lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
__lowerCAmelCase = platform.system()
if system == "Linux":
__lowerCAmelCase = "alsa"
__lowerCAmelCase = "default"
elif system == "Darwin":
__lowerCAmelCase = "avfoundation"
__lowerCAmelCase = ":0"
elif system == "Windows":
__lowerCAmelCase = "dshow"
__lowerCAmelCase = "default"
__lowerCAmelCase = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
__lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__lowerCAmelCase = _ffmpeg_stream(lowerCamelCase , lowerCamelCase )
for item in iterator:
yield item
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[Tuple[float, float], float]] = None , lowerCamelCase : str = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
__lowerCAmelCase = stream_chunk_s
else:
__lowerCAmelCase = chunk_length_s
__lowerCAmelCase = ffmpeg_microphone(lowerCamelCase , lowerCamelCase , format_for_conversion=lowerCamelCase )
if format_for_conversion == "s16le":
__lowerCAmelCase = np.intaa
__lowerCAmelCase = 2
elif format_for_conversion == "f32le":
__lowerCAmelCase = np.floataa
__lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
__lowerCAmelCase = chunk_length_s / 6
__lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCamelCase , (int, float) ):
__lowerCAmelCase = [stride_length_s, stride_length_s]
__lowerCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__lowerCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__lowerCAmelCase = datetime.datetime.now()
__lowerCAmelCase = datetime.timedelta(seconds=lowerCamelCase )
for item in chunk_bytes_iter(lowerCamelCase , lowerCamelCase , stride=(stride_left, stride_right) , stream=lowerCamelCase ):
# Put everything back in numpy scale
__lowerCAmelCase = np.frombuffer(item["raw"] , dtype=lowerCamelCase )
__lowerCAmelCase = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
__lowerCAmelCase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Tuple[int, int] , lowerCamelCase : bool = False ):
'''simple docstring'''
__lowerCAmelCase = B""
__lowerCAmelCase , __lowerCAmelCase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
__lowerCAmelCase = 0
for raw in iterator:
acc += raw
if stream and len(lowerCamelCase ) < chunk_len:
__lowerCAmelCase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
__lowerCAmelCase = (_stride_left, stride_right)
__lowerCAmelCase = {"raw": acc[:chunk_len], "stride": stride}
if stream:
__lowerCAmelCase = False
yield item
__lowerCAmelCase = stride_left
__lowerCAmelCase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCamelCase ) > stride_left:
__lowerCAmelCase = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
__lowerCAmelCase = False
yield item
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCamelCase , stdout=subprocess.PIPE , bufsize=lowerCamelCase ) as ffmpeg_process:
while True:
__lowerCAmelCase = ffmpeg_process.stdout.read(lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 39
| 0
|
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCAmelCase : Dict = input('''Enter image url: ''').strip()
print(f'Downloading image from {url} ...')
lowerCAmelCase : Any = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCAmelCase : Dict = soup.find('''meta''', {'''property''': '''og:image'''})['content']
lowerCAmelCase : int = requests.get(image_url).content
lowerCAmelCase : Dict = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(f'Done. Image saved to disk as {file_name}.')
| 719
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __lowerCAmelCase ( lowerCamelCase : List[str] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCAmelCase__ ( UpperCamelCase__ ):
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase ) -> Tuple:
__lowerCAmelCase = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=UpperCamelCase , default=UpperCamelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=UpperCamelCase , help="Name of the model to download" )
download_parser.set_defaults(func=UpperCamelCase )
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
__lowerCAmelCase = model
__lowerCAmelCase = cache
__lowerCAmelCase = force
__lowerCAmelCase = trust_remote_code
def UpperCAmelCase_ ( self ) -> Any:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 39
| 0
|
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : int ):
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowerCAmelCase = np.full((len(lowerCAmelCase__ ), sequence_length, 2) , lowerCAmelCase__ )
else:
__lowerCAmelCase = np.full((len(lowerCAmelCase__ ), sequence_length) , lowerCAmelCase__ )
for i, tensor in enumerate(lowerCAmelCase__ ):
if padding_side == "right":
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowerCAmelCase = tensor[:sequence_length]
else:
__lowerCAmelCase = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowerCAmelCase = tensor[:sequence_length]
else:
__lowerCAmelCase = tensor[:sequence_length]
return out_tensor.tolist()
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__lowerCAmelCase = ord(lowerCAmelCase__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
__lowerCAmelCase = unicodedata.category(lowerCAmelCase__ )
if cat.startswith("P" ):
return True
return False
@dataclass
class UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE ):
a : PreTrainedTokenizerBase
a : Union[bool, str, PaddingStrategy] = True
a : Optional[int] = None
a : Optional[int] = None
a : int = -1_0_0
a : str = "pt"
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Tuple:
import torch
__lowerCAmelCase = "label" if "label" in features[0].keys() else "labels"
__lowerCAmelCase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__lowerCAmelCase = self.tokenizer.pad(
_a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
__lowerCAmelCase = torch.tensor(batch["entity_ids"] ).shape[1]
__lowerCAmelCase = self.tokenizer.padding_side
if padding_side == "right":
__lowerCAmelCase = [
list(_a ) + [self.label_pad_token_id] * (sequence_length - len(_a )) for label in labels
]
else:
__lowerCAmelCase = [
[self.label_pad_token_id] * (sequence_length - len(_a )) + list(_a ) for label in labels
]
__lowerCAmelCase = [feature["ner_tags"] for feature in features]
__lowerCAmelCase = padding_tensor(_a , -1 , _a , _a )
__lowerCAmelCase = [feature["original_entity_spans"] for feature in features]
__lowerCAmelCase = padding_tensor(_a , (-1, -1) , _a , _a )
__lowerCAmelCase = {k: torch.tensor(_a , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 720
|
'''simple docstring'''
def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = 1
__lowerCAmelCase = 2
while i * i <= n:
__lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = 1
__lowerCAmelCase = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCamelCase ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 39
| 0
|
'''simple docstring'''
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = len(_lowerCamelCase )
__lowerCAmelCase = [[0] * n for i in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
__lowerCAmelCase = y_points[i]
for i in range(2 , _lowerCamelCase ):
for j in range(_lowerCamelCase , _lowerCamelCase ):
__lowerCAmelCase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Optional[Any] = """dpr"""
def __init__( self , UpperCamelCase=3_0522 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=1E-12 , UpperCamelCase=0 , UpperCamelCase="absolute" , UpperCamelCase = 0 , **UpperCamelCase , ) -> Tuple:
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = projection_dim
__lowerCAmelCase = position_embedding_type
| 39
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
lowerCAmelCase : Optional[Any] = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
lowerCAmelCase : int = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class UpperCAmelCase__ ( snake_case_ ):
a : Union[str, Any] = """whisper"""
a : int = ["""past_key_values"""]
a : Optional[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , UpperCamelCase=5_1865 , UpperCamelCase=80 , UpperCamelCase=6 , UpperCamelCase=4 , UpperCamelCase=6 , UpperCamelCase=4 , UpperCamelCase=1536 , UpperCamelCase=1536 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=5_0257 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase="gelu" , UpperCamelCase=256 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.02 , UpperCamelCase=False , UpperCamelCase=1500 , UpperCamelCase=448 , UpperCamelCase=5_0256 , UpperCamelCase=5_0256 , UpperCamelCase=5_0256 , UpperCamelCase=None , UpperCamelCase=[220, 5_0256] , UpperCamelCase=False , UpperCamelCase=256 , UpperCamelCase=False , UpperCamelCase=0.05 , UpperCamelCase=10 , UpperCamelCase=2 , UpperCamelCase=0.0 , UpperCamelCase=10 , UpperCamelCase=0 , UpperCamelCase=7 , **UpperCamelCase , ) -> int:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = num_mel_bins
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase = max_source_positions
__lowerCAmelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase = classifier_proj_size
__lowerCAmelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase = apply_spec_augment
__lowerCAmelCase = mask_time_prob
__lowerCAmelCase = mask_time_length
__lowerCAmelCase = mask_time_min_masks
__lowerCAmelCase = mask_feature_prob
__lowerCAmelCase = mask_feature_length
__lowerCAmelCase = mask_feature_min_masks
__lowerCAmelCase = median_filter_width
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , suppress_tokens=UpperCamelCase , begin_suppress_tokens=UpperCamelCase , **UpperCamelCase , )
class UpperCAmelCase__ ( snake_case_ ):
@property
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
__lowerCAmelCase = {0: "batch"}
else:
__lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase , direction="inputs" )
return common_inputs
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = -1 , UpperCamelCase = -1 , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = 2_2050 , UpperCamelCase = 5.0 , UpperCamelCase = 220 , ) -> List[Any]:
__lowerCAmelCase = OrderedDict()
__lowerCAmelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=UpperCamelCase , framework=UpperCamelCase , sampling_rate=UpperCamelCase , time_duration=UpperCamelCase , frequency=UpperCamelCase , )
__lowerCAmelCase = encoder_inputs["input_features"].shape[2]
__lowerCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length
__lowerCAmelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = encoder_inputs.pop("input_features" )
__lowerCAmelCase = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
__lowerCAmelCase = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def UpperCAmelCase_ ( self ) -> List[str]:
return 1E-3
| 700
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
| 0
|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowerCAmelCase : Optional[Any] = TypeVar('''KT''')
lowerCAmelCase : str = TypeVar('''VT''')
class UpperCAmelCase__ ( Generic[KT, VT] ):
def __init__( self , UpperCamelCase = "root" , UpperCamelCase = None ) -> str:
__lowerCAmelCase = key
__lowerCAmelCase = value
__lowerCAmelCase = []
def __repr__( self ) -> Tuple:
return F'''Node({self.key}: {self.value})'''
@property
def UpperCAmelCase_ ( self ) -> str:
return len(self.forward )
class UpperCAmelCase__ ( Generic[KT, VT] ):
def __init__( self , UpperCamelCase = 0.5 , UpperCamelCase = 16 ) -> int:
__lowerCAmelCase = Node[KT, VT]()
__lowerCAmelCase = 0
__lowerCAmelCase = p
__lowerCAmelCase = max_level
def __str__( self ) -> Dict:
__lowerCAmelCase = list(self )
if len(lowercase__ ) == 0:
return F'''SkipList(level={self.level})'''
__lowerCAmelCase = max((len(str(lowercase__ ) ) for item in items) , default=4 )
__lowerCAmelCase = max(lowercase__ , 4 ) + 4
__lowerCAmelCase = self.head
__lowerCAmelCase = []
__lowerCAmelCase = node.forward.copy()
lines.append(F'''[{node.key}]'''.ljust(lowercase__ , "-" ) + "* " * len(lowercase__ ) )
lines.append(" " * label_size + "| " * len(lowercase__ ) )
while len(node.forward ) != 0:
__lowerCAmelCase = node.forward[0]
lines.append(
F'''[{node.key}]'''.ljust(lowercase__ , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(lowercase__ ) )
__lowerCAmelCase = node.forward
lines.append("None".ljust(lowercase__ ) + "* " * len(lowercase__ ) )
return F'''SkipList(level={self.level})\n''' + "\n".join(lowercase__ )
def __iter__( self ) -> Optional[int]:
__lowerCAmelCase = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
__lowerCAmelCase = node.forward[0]
def UpperCAmelCase_ ( self ) -> int:
__lowerCAmelCase = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[Any]:
__lowerCAmelCase = []
__lowerCAmelCase = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__lowerCAmelCase = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(lowercase__ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = self._locate_node(lowercase__ )
if node is not None:
for i, update_node in enumerate(lowercase__ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__lowerCAmelCase = node.forward[i]
else:
__lowerCAmelCase = update_node.forward[:i]
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> List[Any]:
__lowerCAmelCase = self._locate_node(lowercase__ )
if node is not None:
__lowerCAmelCase = value
else:
__lowerCAmelCase = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , lowercase__ ):
update_vector.append(self.head )
__lowerCAmelCase = level
__lowerCAmelCase = Node(lowercase__ , lowercase__ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(lowercase__ )
else:
__lowerCAmelCase = new_node
def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[Any]:
__lowerCAmelCase = self._locate_node(lowercase__ )
if node is not None:
return node.value
return None
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 12 )
skip_list.insert("Key3" , 41 )
skip_list.insert("Key4" , -19 )
__lowerCAmelCase = skip_list.head
__lowerCAmelCase = {}
while node.level != 0:
__lowerCAmelCase = node.forward[0]
__lowerCAmelCase = node.value
assert len(SCREAMING_SNAKE_CASE_ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
skip_list.insert("Key1" , 10 )
skip_list.insert("Key1" , 12 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 10 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 10 )
__lowerCAmelCase = skip_list.head
__lowerCAmelCase = {}
while node.level != 0:
__lowerCAmelCase = node.forward[0]
__lowerCAmelCase = node.value
if len(SCREAMING_SNAKE_CASE_ ) != 4:
print()
assert len(SCREAMING_SNAKE_CASE_ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
assert skip_list.find("Some key" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
skip_list.insert("Key2" , 20 )
assert skip_list.find("Key2" ) == 20
skip_list.insert("Some Key" , 10 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 13 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 10
assert skip_list.find("V" ) == 13
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 14
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 1_42 )
skip_list.insert("Key2" , 15 )
skip_list.delete("X" )
def traverse_keys(lowerCamelCase : Optional[int] ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(SCREAMING_SNAKE_CASE_ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __lowerCAmelCase ( ):
'''simple docstring'''
def is_sorted(lowerCamelCase : int ):
return all(next_item >= item for item, next_item in zip(SCREAMING_SNAKE_CASE_ , lst[1:] ) )
__lowerCAmelCase = SkipList()
for i in range(10 ):
skip_list.insert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert is_sorted(list(SCREAMING_SNAKE_CASE_ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(SCREAMING_SNAKE_CASE_ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(SCREAMING_SNAKE_CASE_ ) )
def __lowerCAmelCase ( ):
'''simple docstring'''
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 701
|
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCAmelCase = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
__lowerCAmelCase = features.copy()
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , split=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
if issubclass(lowerCamelCase , lowerCamelCase ):
__lowerCAmelCase = jsonl_path
elif issubclass(lowerCamelCase , lowerCamelCase ):
__lowerCAmelCase = [jsonl_path]
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : str=("train",) ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase )
for split in splits:
__lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase ( lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : List[str] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read()
_check_json_datasetdict(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_datasetdict(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : int ):
'''simple docstring'''
if split:
__lowerCAmelCase = {split: jsonl_path}
else:
__lowerCAmelCase = "train"
__lowerCAmelCase = {"train": jsonl_path, "test": jsonl_path}
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_datasetdict(lowerCamelCase , lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return json.load(lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
return [json.loads(lowerCamelCase ) for line in buffer]
class UpperCAmelCase__ :
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json_function(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
assert isinstance(exported_content[0] , UpperCamelCase )
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json_function(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
assert isinstance(exported_content[0] , UpperCamelCase )
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase ) == 10
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any:
with pytest.raises(UpperCamelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
__lowerCAmelCase = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}'''
__lowerCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , compression=UpperCamelCase ).write()
with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f:
__lowerCAmelCase = f.read()
with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f:
__lowerCAmelCase = f.read()
assert exported_content == original_content
| 39
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = BlipImageProcessor()
__lowerCAmelCase = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
__lowerCAmelCase = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
__lowerCAmelCase = InstructBlipProcessor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self , **UpperCamelCase ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase ).tokenizer
def UpperCAmelCase_ ( self , **UpperCamelCase ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase ).image_processor
def UpperCAmelCase_ ( self , **UpperCamelCase ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase ).qformer_tokenizer
def UpperCAmelCase_ ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ) -> int:
__lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ) -> int:
__lowerCAmelCase = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__lowerCAmelCase = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
__lowerCAmelCase = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
self.assertIsInstance(processor.qformer_tokenizer , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_qformer_tokenizer()
__lowerCAmelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase , image_processor=UpperCamelCase , qformer_tokenizer=UpperCamelCase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(UpperCamelCase , return_tensors="np" )
__lowerCAmelCase = processor(images=UpperCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase_ ( self ) -> int:
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_qformer_tokenizer()
__lowerCAmelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase , image_processor=UpperCamelCase , qformer_tokenizer=UpperCamelCase )
__lowerCAmelCase = 'lower newer'
__lowerCAmelCase = processor(text=UpperCamelCase )
__lowerCAmelCase = tokenizer(UpperCamelCase , return_token_type_ids=UpperCamelCase )
__lowerCAmelCase = qformer_tokenizer(UpperCamelCase , return_token_type_ids=UpperCamelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] )
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_qformer_tokenizer()
__lowerCAmelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase , image_processor=UpperCamelCase , qformer_tokenizer=UpperCamelCase )
__lowerCAmelCase = 'lower newer'
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_qformer_tokenizer()
__lowerCAmelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase , image_processor=UpperCamelCase , qformer_tokenizer=UpperCamelCase )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(UpperCamelCase )
__lowerCAmelCase = tokenizer.batch_decode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_qformer_tokenizer()
__lowerCAmelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase , image_processor=UpperCamelCase , qformer_tokenizer=UpperCamelCase )
__lowerCAmelCase = 'lower newer'
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 702
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : str = logging.get_logger(__name__)
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : List[str]=False ):
'''simple docstring'''
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def __lowerCAmelCase ( lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ''
else:
__lowerCAmelCase = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
__lowerCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__lowerCAmelCase = dct.pop(__lowercase )
__lowerCAmelCase = val
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = DeiTConfig()
# all deit models have fine-tuned heads
__lowerCAmelCase = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
__lowerCAmelCase = 10_00
__lowerCAmelCase = 'huggingface/label-files'
__lowerCAmelCase = 'imagenet-1k-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(__lowercase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = int(deit_name[-6:-4] )
__lowerCAmelCase = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
__lowerCAmelCase = 1_92
__lowerCAmelCase = 7_68
__lowerCAmelCase = 12
__lowerCAmelCase = 3
elif deit_name[9:].startswith("small" ):
__lowerCAmelCase = 3_84
__lowerCAmelCase = 15_36
__lowerCAmelCase = 12
__lowerCAmelCase = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
__lowerCAmelCase = 10_24
__lowerCAmelCase = 40_96
__lowerCAmelCase = 24
__lowerCAmelCase = 16
# load original model from timm
__lowerCAmelCase = timm.create_model(__lowercase , pretrained=__lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = timm_model.state_dict()
__lowerCAmelCase = create_rename_keys(__lowercase , __lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_q_k_v(__lowercase , __lowercase , __lowercase )
# load HuggingFace model
__lowerCAmelCase = DeiTForImageClassificationWithTeacher(__lowercase ).eval()
model.load_state_dict(__lowercase )
# Check outputs on an image, prepared by DeiTImageProcessor
__lowerCAmelCase = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
__lowerCAmelCase = DeiTImageProcessor(size=__lowercase , crop_size=config.image_size )
__lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" )
__lowerCAmelCase = encoding['pixel_values']
__lowerCAmelCase = model(__lowercase )
__lowerCAmelCase = timm_model(__lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowercase , outputs.logits , atol=1e-3 )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 703
|
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : List[str] = (CMStochasticIterativeScheduler,)
a : str = 1_0
def UpperCAmelCase_ ( self , **UpperCamelCase ) -> str:
__lowerCAmelCase = {
"num_train_timesteps": 201,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
config.update(**UpperCamelCase )
return config
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = 10
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = self.scheduler_classes[0](**UpperCamelCase )
scheduler.set_timesteps(UpperCamelCase )
__lowerCAmelCase = scheduler.timesteps[0]
__lowerCAmelCase = scheduler.timesteps[1]
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ ( self ) -> Any:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = 1
scheduler.set_timesteps(UpperCamelCase )
__lowerCAmelCase = scheduler.timesteps
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(UpperCamelCase ):
# 1. scale model input
__lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# 2. predict noise residual
__lowerCAmelCase = model(UpperCamelCase , UpperCamelCase )
# 3. predict previous sample x_t-1
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [106, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase )
__lowerCAmelCase = scheduler.timesteps
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# 2. predict noise residual
__lowerCAmelCase = model(UpperCamelCase , UpperCamelCase )
# 3. predict previous sample x_t-1
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [39, 30, 12, 15, 0]
with self.assertRaises(UpperCamelCase , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [39, 30, 12, 1, 0]
__lowerCAmelCase = len(UpperCamelCase )
with self.assertRaises(UpperCamelCase , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase , timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : str = logging.get_logger(__name__)
def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : Dict=False ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
__lowerCAmelCase = "segformer.encoder." + key
if key.startswith("backbone" ):
__lowerCAmelCase = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
__lowerCAmelCase = key[key.find("patch_embed" ) + len("patch_embed" )]
__lowerCAmelCase = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(__snake_case )-1}''' )
if "norm" in key:
__lowerCAmelCase = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
__lowerCAmelCase = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
__lowerCAmelCase = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(__snake_case )-1}''' )
if "layer_norm1" in key:
__lowerCAmelCase = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
__lowerCAmelCase = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
__lowerCAmelCase = key[key.find("block" ) + len("block" )]
__lowerCAmelCase = key.replace(f'''block{idx}''' , f'''block.{int(__snake_case )-1}''' )
if "attn.q" in key:
__lowerCAmelCase = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
__lowerCAmelCase = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
__lowerCAmelCase = key.replace("attn" , "attention.self" )
if "fc1" in key:
__lowerCAmelCase = key.replace("fc1" , "dense1" )
if "fc2" in key:
__lowerCAmelCase = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
__lowerCAmelCase = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
__lowerCAmelCase = key.replace("linear_fuse.conv" , "linear_fuse" )
__lowerCAmelCase = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
__lowerCAmelCase = key[key.find("linear_c" ) + len("linear_c" )]
__lowerCAmelCase = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(__snake_case )-1}''' )
if key.startswith("head" ):
__lowerCAmelCase = key.replace("head" , "classifier" )
__lowerCAmelCase = value
return new_state_dict
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
__lowerCAmelCase = state_dict.pop(f'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
__lowerCAmelCase = state_dict.pop(f'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
__lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
__lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
__lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
__lowerCAmelCase = kv_bias[
config.hidden_sizes[i] :
]
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return image
@torch.no_grad()
def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__lowerCAmelCase = SegformerConfig()
__lowerCAmelCase = False
# set attributes based on model_name
__lowerCAmelCase = "huggingface/label-files"
if "segformer" in model_name:
__lowerCAmelCase = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
__lowerCAmelCase = 1_50
__lowerCAmelCase = "ade20k-id2label.json"
__lowerCAmelCase = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
__lowerCAmelCase = 19
__lowerCAmelCase = "cityscapes-id2label.json"
__lowerCAmelCase = (1, 19, 1_28, 1_28)
else:
raise ValueError(f'''Model {model_name} not supported''' )
elif "mit" in model_name:
__lowerCAmelCase = True
__lowerCAmelCase = model_name[4:6]
__lowerCAmelCase = 10_00
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = (1, 10_00)
else:
raise ValueError(f'''Model {model_name} not supported''' )
# set config attributes
__lowerCAmelCase = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(__snake_case ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
__lowerCAmelCase = [64, 1_28, 3_20, 5_12]
__lowerCAmelCase = 2_56
elif size == "b2":
__lowerCAmelCase = [64, 1_28, 3_20, 5_12]
__lowerCAmelCase = 7_68
__lowerCAmelCase = [3, 4, 6, 3]
elif size == "b3":
__lowerCAmelCase = [64, 1_28, 3_20, 5_12]
__lowerCAmelCase = 7_68
__lowerCAmelCase = [3, 4, 18, 3]
elif size == "b4":
__lowerCAmelCase = [64, 1_28, 3_20, 5_12]
__lowerCAmelCase = 7_68
__lowerCAmelCase = [3, 8, 27, 3]
elif size == "b5":
__lowerCAmelCase = [64, 1_28, 3_20, 5_12]
__lowerCAmelCase = 7_68
__lowerCAmelCase = [3, 6, 40, 3]
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor (only resize + normalize)
__lowerCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case )
# prepare image
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=__snake_case , return_tensors="pt" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
__lowerCAmelCase = torch.load(__snake_case , map_location=torch.device("cpu" ) )
else:
__lowerCAmelCase = torch.load(__snake_case , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
__lowerCAmelCase = rename_keys(__snake_case , encoder_only=__snake_case )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(__snake_case , __snake_case )
# create HuggingFace model and load state dict
if encoder_only:
__lowerCAmelCase = False
__lowerCAmelCase = SegformerForImageClassification(__snake_case )
else:
__lowerCAmelCase = SegformerForSemanticSegmentation(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# forward pass
__lowerCAmelCase = model(__snake_case )
__lowerCAmelCase = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
__lowerCAmelCase = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
__lowerCAmelCase = torch.tensor(
[
[[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]],
[[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]],
[[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
__lowerCAmelCase = torch.tensor(
[
[[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]],
[[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]],
[[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
__lowerCAmelCase = torch.tensor(
[
[[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]],
[[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]],
[[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
__lowerCAmelCase = torch.tensor(
[
[[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]],
[[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]],
[[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
__lowerCAmelCase = torch.tensor(
[
[[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]],
[[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]],
[[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]],
[[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]],
[[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]],
[[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]],
[[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
__lowerCAmelCase = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]],
[[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]],
[[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]],
[[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]],
[[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]],
[[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]],
[[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]],
[[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]],
[[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]],
[[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]],
[[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]],
] )
else:
__lowerCAmelCase = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1e-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 704
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCAmelCase ( lowerCamelCase : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
__lowerCAmelCase = BeautifulSoup(requests.get(lowerCamelCase ).text , "html.parser" )
__lowerCAmelCase = soup.findAll("h1" )
__lowerCAmelCase = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(lowerCamelCase , lowerCamelCase )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'{key}\n{value}\n')
| 39
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : int = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCAmelCase__ ( __a , __a ):
a : List[str] = '''swin'''
a : str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , UpperCamelCase=224 , UpperCamelCase=4 , UpperCamelCase=3 , UpperCamelCase=96 , UpperCamelCase=[2, 2, 6, 2] , UpperCamelCase=[3, 6, 12, 24] , UpperCamelCase=7 , UpperCamelCase=4.0 , UpperCamelCase=True , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.1 , UpperCamelCase="gelu" , UpperCamelCase=False , UpperCamelCase=0.02 , UpperCamelCase=1E-5 , UpperCamelCase=32 , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase , ) -> Tuple:
super().__init__(**snake_case__ )
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embed_dim
__lowerCAmelCase = depths
__lowerCAmelCase = len(snake_case__ )
__lowerCAmelCase = num_heads
__lowerCAmelCase = window_size
__lowerCAmelCase = mlp_ratio
__lowerCAmelCase = qkv_bias
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = hidden_act
__lowerCAmelCase = use_absolute_embeddings
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = initializer_range
__lowerCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCAmelCase = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
__lowerCAmelCase = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(snake_case__ ) + 1 )]
__lowerCAmelCase , __lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
class UpperCAmelCase__ ( __a ):
a : Dict = version.parse("""1.11""" )
@property
def UpperCAmelCase_ ( self ) -> List[str]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return 1E-4
| 705
|
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
if len(lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
__lowerCAmelCase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase ) )
]
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase ) )
]
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
if len(lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
__lowerCAmelCase = len(lowerCamelCase )
__lowerCAmelCase = matrix_length // 2
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase )]
__lowerCAmelCase = [
[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )
]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase )]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )]
return top_left, top_right, bot_left, bot_right
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
return len(lowerCamelCase ), len(matrix[0] )
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
print("\n".join(str(lowerCamelCase ) for line in matrix ) )
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
if matrix_dimensions(lowerCamelCase ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase )
__lowerCAmelCase = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
__lowerCAmelCase = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase )
__lowerCAmelCase = matrix_addition(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = matrix_addition(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase )
# construct the new matrix from our 4 quadrants
__lowerCAmelCase = []
for i in range(len(lowerCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
if matrix_dimensions(lowerCamelCase )[1] != matrix_dimensions(lowerCamelCase )[0]:
__lowerCAmelCase = (
"Unable to multiply these matrices, please check the dimensions.\n"
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(lowerCamelCase )
__lowerCAmelCase = matrix_dimensions(lowerCamelCase )
__lowerCAmelCase = matrix_dimensions(lowerCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCAmelCase = max(*lowerCamelCase , *lowerCamelCase )
__lowerCAmelCase = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase ) ) ) )
__lowerCAmelCase = matrixa
__lowerCAmelCase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__lowerCAmelCase = actual_strassen(lowerCamelCase , lowerCamelCase )
# Removing the additional zeros
for i in range(0 , lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCAmelCase : Tuple = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCAmelCase : Any = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 39
| 0
|
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowerCAmelCase : Optional[int] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
lowerCAmelCase : str = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
lowerCAmelCase : int = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
lowerCAmelCase : str = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
lowerCAmelCase : Tuple = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def UpperCAmelCase_ ( self ) -> List[Any]:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=[1, 10, 100] , UpperCamelCase=4 , UpperCamelCase=3.0 ) -> List[Any]:
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=a_ ) as executor:
__lowerCAmelCase = []
__lowerCAmelCase = Counter()
__lowerCAmelCase = 0
__lowerCAmelCase = defaultdict(a_ )
for task_id, (candidates, test_case) in enumerate(zip(a_ , a_ ) ):
for candidate in candidates:
__lowerCAmelCase = candidate + "\n" + test_case
__lowerCAmelCase = (test_program, timeout, task_id, completion_id[task_id])
__lowerCAmelCase = executor.submit(a_ , *a_ )
futures.append(a_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(a_ ):
__lowerCAmelCase = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
__lowerCAmelCase = [], []
for result in results.values():
result.sort()
__lowerCAmelCase = [r[1]["passed"] for r in result]
total.append(len(a_ ) )
correct.append(sum(a_ ) )
__lowerCAmelCase = np.array(a_ )
__lowerCAmelCase = np.array(a_ )
__lowerCAmelCase = k
__lowerCAmelCase = {F'''pass@{k}''': estimate_pass_at_k(a_ , a_ , a_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : List[Any] ):
'''simple docstring'''
def estimator(lowerCamelCase : Dict , lowerCamelCase : Tuple , lowerCamelCase : List[str] ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowerCAmelCase = itertools.repeat(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
else:
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
__lowerCAmelCase = iter(lowerCAmelCase_ )
return np.array([estimator(int(lowerCAmelCase_ ) , int(lowerCAmelCase_ ) , lowerCAmelCase_ ) for n, c in zip(lowerCAmelCase_ , lowerCAmelCase_ )] )
| 706
|
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowerCAmelCase : Optional[Any] = '''scheduler_config.json'''
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : str = 1
a : Optional[int] = 2
a : int = 3
a : Union[str, Any] = 4
a : int = 5
a : Optional[int] = 6
a : str = 7
a : List[Any] = 8
a : List[str] = 9
a : List[str] = 1_0
a : int = 1_1
a : Any = 1_2
a : Any = 1_3
a : Tuple = 1_4
@dataclass
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : torch.FloatTensor
class UpperCAmelCase__ :
a : Tuple = SCHEDULER_CONFIG_NAME
a : Union[str, Any] = []
a : str = True
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase=False , **UpperCamelCase , ) -> int:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = cls.load_config(
pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , )
return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False , **UpperCamelCase ) -> Dict:
self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase )
@property
def UpperCAmelCase_ ( self ) -> str:
return self._get_compatibles()
@classmethod
def UpperCAmelCase_ ( cls ) -> Tuple:
__lowerCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
__lowerCAmelCase = importlib.import_module(__name__.split("." )[0] )
__lowerCAmelCase = [
getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase )
]
return compatible_classes
| 39
| 0
|
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
a : List[Any] = 4_2
a : Optional[int] = 4_2
a : List[Any] = 4_2
@dataclass
class UpperCAmelCase__ :
a : List[Any] = 4_2
a : str = 4_2
a : Dict = None
a : List[Any] = None
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Union[str, Any] = """train"""
a : Dict = """dev"""
a : Any = """test"""
class UpperCAmelCase__ :
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Optional[int]:
raise NotImplementedError
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase ) -> str:
raise NotImplementedError
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False , UpperCamelCase="[CLS]" , UpperCamelCase=1 , UpperCamelCase="[SEP]" , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=0 , UpperCamelCase=0 , UpperCamelCase=-100 , UpperCamelCase=0 , UpperCamelCase=True , ) -> str:
__lowerCAmelCase = {label: i for i, label in enumerate(UpperCamelCase )}
__lowerCAmelCase = []
for ex_index, example in enumerate(UpperCamelCase ):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d of %d" , UpperCamelCase , len(UpperCamelCase ) )
__lowerCAmelCase = []
__lowerCAmelCase = []
for word, label in zip(example.words , example.labels ):
__lowerCAmelCase = tokenizer.tokenize(UpperCamelCase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(UpperCamelCase ) > 0:
tokens.extend(UpperCamelCase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(UpperCamelCase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__lowerCAmelCase = tokenizer.num_special_tokens_to_add()
if len(UpperCamelCase ) > max_seq_length - special_tokens_count:
__lowerCAmelCase = tokens[: (max_seq_length - special_tokens_count)]
__lowerCAmelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__lowerCAmelCase = [sequence_a_segment_id] * len(UpperCamelCase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__lowerCAmelCase = [cls_token] + tokens
__lowerCAmelCase = [pad_token_label_id] + label_ids
__lowerCAmelCase = [cls_token_segment_id] + segment_ids
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__lowerCAmelCase = [1 if mask_padding_with_zero else 0] * len(UpperCamelCase )
# Zero-pad up to the sequence length.
__lowerCAmelCase = max_seq_length - len(UpperCamelCase )
if pad_on_left:
__lowerCAmelCase = ([pad_token] * padding_length) + input_ids
__lowerCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__lowerCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids
__lowerCAmelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(UpperCamelCase ) == max_seq_length
assert len(UpperCamelCase ) == max_seq_length
assert len(UpperCamelCase ) == max_seq_length
assert len(UpperCamelCase ) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***" )
logger.info("guid: %s" , example.guid )
logger.info("tokens: %s" , " ".join([str(UpperCamelCase ) for x in tokens] ) )
logger.info("input_ids: %s" , " ".join([str(UpperCamelCase ) for x in input_ids] ) )
logger.info("input_mask: %s" , " ".join([str(UpperCamelCase ) for x in input_mask] ) )
logger.info("segment_ids: %s" , " ".join([str(UpperCamelCase ) for x in segment_ids] ) )
logger.info("label_ids: %s" , " ".join([str(UpperCamelCase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCAmelCase = None
features.append(
InputFeatures(
input_ids=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , label_ids=UpperCamelCase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Union[str, Any] = 4_2
a : Union[str, Any] = nn.CrossEntropyLoss().ignore_index
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase=False , UpperCamelCase = Split.train , ) -> Optional[Any]:
# Load data features from cache or dataset file
__lowerCAmelCase = os.path.join(
UpperCamelCase , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(UpperCamelCase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + ".lock"
with FileLock(UpperCamelCase ):
if os.path.exists(UpperCamelCase ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
__lowerCAmelCase = torch.load(UpperCamelCase )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
__lowerCAmelCase = token_classification_task.read_examples_from_file(UpperCamelCase , UpperCamelCase )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCAmelCase = token_classification_task.convert_examples_to_features(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=UpperCamelCase , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , UpperCamelCase )
def __len__( self ) -> Union[str, Any]:
return len(self.features )
def __getitem__( self , UpperCamelCase ) -> Any:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase__ :
a : List[Any] = 4_2
a : Dict = -1_0_0
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase=False , UpperCamelCase = Split.train , ) -> int:
__lowerCAmelCase = token_classification_task.read_examples_from_file(UpperCamelCase , UpperCamelCase )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCAmelCase = token_classification_task.convert_examples_to_features(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=UpperCamelCase , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCAmelCase = tf.data.Dataset.from_generator(
UpperCamelCase , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , (
{"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__lowerCAmelCase = tf.data.Dataset.from_generator(
UpperCamelCase , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , (
{
"input_ids": tf.TensorShape([None] ),
"attention_mask": tf.TensorShape([None] ),
"token_type_ids": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ) -> Dict:
return len(self.features )
def __getitem__( self , UpperCamelCase ) -> int:
return self.features[i]
| 707
|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase : List[Any] = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase = None ) -> Union[str, Any]:
__lowerCAmelCase = (
os.path.join(UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__lowerCAmelCase = Extractor
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__lowerCAmelCase = os.path.abspath(UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase ) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> bool:
return force_extract or (
not os.path.isfile(UpperCamelCase ) and not (os.path.isdir(UpperCamelCase ) and os.listdir(UpperCamelCase ))
)
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False ) -> str:
__lowerCAmelCase = self.extractor.infer_extractor_format(UpperCamelCase )
if not extractor_format:
return input_path
__lowerCAmelCase = self._get_output_path(UpperCamelCase )
if self._do_extract(UpperCamelCase , UpperCamelCase ):
self.extractor.extract(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return output_path
class UpperCAmelCase__ ( UpperCamelCase__ ):
@classmethod
@abstractmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool:
...
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
...
class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
a : List[bytes] = []
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
with open(UpperCamelCase , "rb" ) as f:
return f.read(UpperCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool:
if not magic_number:
__lowerCAmelCase = max(len(UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
__lowerCAmelCase = cls.read_magic_number(UpperCamelCase , UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase__ ( UpperCamelCase__ ):
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool:
return tarfile.is_tarfile(UpperCamelCase )
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict:
def resolved(UpperCamelCase ) -> str:
return os.path.realpath(os.path.abspath(UpperCamelCase ) )
def badpath(UpperCamelCase , UpperCamelCase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCamelCase , UpperCamelCase ) ).startswith(UpperCamelCase )
def badlink(UpperCamelCase , UpperCamelCase ) -> bool:
# Links are interpreted relative to the directory containing the link
__lowerCAmelCase = resolved(os.path.join(UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=UpperCamelCase )
__lowerCAmelCase = resolved(UpperCamelCase )
for finfo in members:
if badpath(finfo.name , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(UpperCamelCase , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(UpperCamelCase , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
__lowerCAmelCase = tarfile.open(UpperCamelCase )
tar_file.extractall(UpperCamelCase , members=TarExtractor.safemembers(UpperCamelCase , UpperCamelCase ) )
tar_file.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x1F\x8B"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with gzip.open(UpperCamelCase , "rb" ) as gzip_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : List[Any] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool:
if super().is_extractable(UpperCamelCase , magic_number=UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCamelCase , "rb" ) as fp:
__lowerCAmelCase = _EndRecData(UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__lowerCAmelCase = fp.read(UpperCamelCase ) # CD is where we expect it to be
if len(UpperCamelCase ) == sizeCentralDir:
__lowerCAmelCase = struct.unpack(UpperCamelCase , UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with zipfile.ZipFile(UpperCamelCase , "r" ) as zip_file:
zip_file.extractall(UpperCamelCase )
zip_file.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Tuple = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with lzma.open(UpperCamelCase ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : str = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
__lowerCAmelCase = rarfile.RarFile(UpperCamelCase )
rf.extractall(UpperCamelCase )
rf.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : int = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
__lowerCAmelCase = zstd.ZstdDecompressor()
with open(UpperCamelCase , "rb" ) as ifh, open(UpperCamelCase , "wb" ) as ofh:
dctx.copy_stream(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with bza.open(UpperCamelCase , "rb" ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with pyazr.SevenZipFile(UpperCamelCase , "r" ) as archive:
archive.extractall(UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(UpperCamelCase , "rb" ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
a : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCAmelCase_ ( cls ) -> Optional[Any]:
return max(
len(UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(UpperCamelCase , UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict:
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase , magic_number_length=UpperCamelCase )
except OSError:
return b""
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = False ) -> bool:
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=UpperCamelCase , )
__lowerCAmelCase = cls.infer_extractor_format(UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase ) -> str: # <Added version="2.4.0"/>
__lowerCAmelCase = cls._get_magic_number_max_length()
__lowerCAmelCase = cls._read_magic_number(UpperCamelCase , UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCamelCase , magic_number=UpperCamelCase ):
return extractor_format
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(UpperCamelCase ) , exist_ok=UpperCamelCase )
# Prevent parallel extractions
__lowerCAmelCase = str(Path(UpperCamelCase ).with_suffix(".lock" ) )
with FileLock(UpperCamelCase ):
shutil.rmtree(UpperCamelCase , ignore_errors=UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCamelCase , UpperCamelCase ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=UpperCamelCase , )
__lowerCAmelCase = extractor if extractor != "deprecated" else extractor_format
else:
__lowerCAmelCase = cls.extractors[extractor_format]
return extractor.extract(UpperCamelCase , UpperCamelCase )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCamelCase ):
return extractor.extract(UpperCamelCase , UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : List[str] = {
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = ['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 708
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self ) -> List[str]:
# test for the above condition
self.test()
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = 0
__lowerCAmelCase = False
while not completed:
if counter == 1:
self.reset()
__lowerCAmelCase = self.advance()
if not self.does_advance(UpperCamelCase ):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.update(UpperCamelCase )
counter += 1
if counter > 1_0000:
raise Exception("update() does not fulfill the constraint." )
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly." )
@abstractmethod
def UpperCAmelCase_ ( self ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self ) -> int:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self ) -> int:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> str:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self , UpperCamelCase ) -> Dict:
super(UpperCamelCase , self ).__init__()
if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
__lowerCAmelCase = token_ids
__lowerCAmelCase = len(self.token_ids )
__lowerCAmelCase = -1 # the index of the currently fulfilled step
__lowerCAmelCase = False
def UpperCAmelCase_ ( self ) -> Optional[int]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
if self.does_advance(UpperCamelCase ):
self.fulfilled_idx += 1
__lowerCAmelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
__lowerCAmelCase = True
__lowerCAmelCase = completed
else:
# failed to make progress.
__lowerCAmelCase = True
self.reset()
return stepped, completed, reset
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = False
__lowerCAmelCase = 0
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Optional[Any]:
__lowerCAmelCase = PhrasalConstraint(self.token_ids )
if stateful:
__lowerCAmelCase = self.seqlen
__lowerCAmelCase = self.fulfilled_idx
__lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase , UpperCamelCase=True ) -> Optional[int]:
__lowerCAmelCase = max([len(UpperCamelCase ) for one in nested_token_ids] )
__lowerCAmelCase = {}
for token_ids in nested_token_ids:
__lowerCAmelCase = root
for tidx, token_id in enumerate(UpperCamelCase ):
if token_id not in level:
__lowerCAmelCase = {}
__lowerCAmelCase = level[token_id]
if no_subsets and self.has_subsets(UpperCamelCase , UpperCamelCase ):
raise ValueError(
"Each list in `nested_token_ids` can't be a complete subset of another list, but is"
F''' {nested_token_ids}.''' )
__lowerCAmelCase = root
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
__lowerCAmelCase = self.trie
for current_token in current_seq:
__lowerCAmelCase = start[current_token]
__lowerCAmelCase = list(start.keys() )
return next_tokens
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
__lowerCAmelCase = self.next_tokens(UpperCamelCase )
return len(UpperCamelCase ) == 0
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = list(root.values() )
if len(UpperCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCamelCase ) for nn in next_nodes] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
__lowerCAmelCase = self.count_leaves(UpperCamelCase )
return len(UpperCamelCase ) != leaf_count
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self , UpperCamelCase ) -> List[Any]:
super(UpperCamelCase , self ).__init__()
if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(UpperCamelCase , UpperCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
__lowerCAmelCase = DisjunctiveTrie(UpperCamelCase )
__lowerCAmelCase = nested_token_ids
__lowerCAmelCase = self.trie.max_height
__lowerCAmelCase = []
__lowerCAmelCase = False
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = self.trie.next_tokens(self.current_seq )
if len(UpperCamelCase ) == 0:
return None
else:
return token_list
def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[str]:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
if self.does_advance(UpperCamelCase ):
self.current_seq.append(UpperCamelCase )
__lowerCAmelCase = True
else:
__lowerCAmelCase = True
self.reset()
__lowerCAmelCase = self.trie.reached_leaf(self.current_seq )
__lowerCAmelCase = completed
return stepped, completed, reset
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = False
__lowerCAmelCase = []
def UpperCAmelCase_ ( self ) -> int:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Union[str, Any]:
__lowerCAmelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
__lowerCAmelCase = self.seqlen
__lowerCAmelCase = self.current_seq
__lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase ) -> Union[str, Any]:
__lowerCAmelCase = constraints
# max # of steps required to fulfill a given constraint
__lowerCAmelCase = max([c.seqlen for c in constraints] )
__lowerCAmelCase = len(UpperCamelCase )
__lowerCAmelCase = False
self.init_state()
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = []
__lowerCAmelCase = None
__lowerCAmelCase = [constraint.copy(stateful=UpperCamelCase ) for constraint in self.constraints]
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__lowerCAmelCase = constraint.advance()
if isinstance(UpperCamelCase , UpperCamelCase ):
token_list.append(UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
token_list.extend(UpperCamelCase )
else:
__lowerCAmelCase = self.inprogress_constraint.advance()
if isinstance(UpperCamelCase , UpperCamelCase ):
token_list.append(UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
token_list.extend(UpperCamelCase )
if len(UpperCamelCase ) == 0:
return None
else:
return token_list
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__lowerCAmelCase , __lowerCAmelCase = self.add(UpperCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Dict:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
__lowerCAmelCase , __lowerCAmelCase = False, False
if self.completed:
__lowerCAmelCase = True
__lowerCAmelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.inprogress_constraint.update(UpperCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase ) )
__lowerCAmelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__lowerCAmelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
__lowerCAmelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCamelCase ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = pending_constraint.update(UpperCamelCase )
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true." )
if complete:
self.complete_constraints.append(UpperCamelCase )
__lowerCAmelCase = None
if not complete and stepped:
__lowerCAmelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__lowerCAmelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__lowerCAmelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCAmelCase_ ( self , UpperCamelCase=True ) -> str:
__lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__lowerCAmelCase = [
constraint.copy(stateful=UpperCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__lowerCAmelCase = self.inprogress_constraint.copy(stateful=UpperCamelCase )
__lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 39
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase : List[str] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 709
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ):
a : List[Any] = KandinskyImgaImgPipeline
a : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
a : List[Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
a : Any = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Union[str, Any] = False
@property
def UpperCAmelCase_ ( self ) -> int:
return 32
@property
def UpperCAmelCase_ ( self ) -> List[str]:
return 32
@property
def UpperCAmelCase_ ( self ) -> Dict:
return self.time_input_dim
@property
def UpperCAmelCase_ ( self ) -> int:
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ) -> int:
return 100
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
__lowerCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__lowerCAmelCase = MultilingualCLIP(UpperCamelCase )
__lowerCAmelCase = text_encoder.eval()
return text_encoder
@property
def UpperCAmelCase_ ( self ) -> List[str]:
torch.manual_seed(0 )
__lowerCAmelCase = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__lowerCAmelCase = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def UpperCAmelCase_ ( self ) -> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = self.dummy_tokenizer
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
__lowerCAmelCase = DDIMScheduler(**UpperCamelCase )
__lowerCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=0 ) -> Optional[Any]:
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((256, 256) )
if str(UpperCamelCase ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(UpperCamelCase )
else:
__lowerCAmelCase = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
__lowerCAmelCase = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = "cpu"
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**UpperCamelCase )
__lowerCAmelCase = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__lowerCAmelCase = "A red cartoon frog, 4k"
__lowerCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
__lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__lowerCAmelCase = pipeline(
UpperCamelCase , image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : str = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class UpperCAmelCase__ ( snake_case__ ):
a : List[str] = '''codegen'''
a : List[str] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , UpperCamelCase=5_0400 , UpperCamelCase=2048 , UpperCamelCase=2048 , UpperCamelCase=4096 , UpperCamelCase=28 , UpperCamelCase=16 , UpperCamelCase=64 , UpperCamelCase=None , UpperCamelCase="gelu_new" , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=1E-5 , UpperCamelCase=0.02 , UpperCamelCase=True , UpperCamelCase=5_0256 , UpperCamelCase=5_0256 , UpperCamelCase=False , **UpperCamelCase , ) -> Optional[Any]:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_ctx
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(
bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A )
class UpperCAmelCase__ ( snake_case__ ):
def __init__( self , UpperCamelCase , UpperCamelCase = "default" , UpperCamelCase = None , UpperCamelCase = False , ) -> Union[str, Any]:
super().__init__(_A , task=_A , patching_specs=_A , use_past=_A )
if not getattr(self._config , "pad_token_id" , _A ):
# TODO: how to do that better?
__lowerCAmelCase = 0
@property
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(_A , direction="inputs" )
__lowerCAmelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCAmelCase_ ( self ) -> str:
return self._config.n_layer
@property
def UpperCAmelCase_ ( self ) -> Dict:
return self._config.n_head
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = -1 , UpperCamelCase = -1 , UpperCamelCase = False , UpperCamelCase = None , ) -> List[str]:
__lowerCAmelCase = super(_A , self ).generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__lowerCAmelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs['attention_mask']
if self.use_past:
__lowerCAmelCase = ordered_inputs['attention_mask'].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_A , _A , dtype=_A )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase_ ( self ) -> str:
return 13
| 710
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
lowerCAmelCase : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class UpperCAmelCase__ :
a : Optional[str] = field(default=UpperCamelCase__ , metadata={"""help""": """The input training data file (a text file)."""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCAmelCase_ ( self ) -> Tuple:
if self.train_file is not None:
__lowerCAmelCase = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__lowerCAmelCase = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCAmelCase__ :
a : PreTrainedTokenizerBase
a : Union[bool, str, PaddingStrategy] = True
a : Optional[int] = None
a : Optional[int] = None
def __call__( self , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = "label" if "label" in features[0].keys() else "labels"
__lowerCAmelCase = [feature.pop(UpperCamelCase ) for feature in features]
__lowerCAmelCase = len(UpperCamelCase )
__lowerCAmelCase = len(features[0]["input_ids"] )
__lowerCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCamelCase )] for feature in features
]
__lowerCAmelCase = list(chain(*UpperCamelCase ) )
__lowerCAmelCase = self.tokenizer.pad(
UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
__lowerCAmelCase = {k: v.view(UpperCamelCase , UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
__lowerCAmelCase = torch.tensor(UpperCamelCase , dtype=torch.intaa )
return batch
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__lowerCAmelCase = {}
if data_args.train_file is not None:
__lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCAmelCase = data_args.validation_file
__lowerCAmelCase = data_args.train_file.split("." )[-1]
__lowerCAmelCase = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__lowerCAmelCase = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__lowerCAmelCase = [f'''ending{i}''' for i in range(4 )]
__lowerCAmelCase = "sent1"
__lowerCAmelCase = "sent2"
if data_args.max_seq_length is None:
__lowerCAmelCase = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
__lowerCAmelCase = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
__lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase : Tuple ):
__lowerCAmelCase = [[context] * 4 for context in examples[context_name]]
__lowerCAmelCase = examples[question_header_name]
__lowerCAmelCase = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
__lowerCAmelCase = list(chain(*lowerCamelCase ) )
__lowerCAmelCase = list(chain(*lowerCamelCase ) )
# Tokenize
__lowerCAmelCase = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
__lowerCAmelCase = raw_datasets["train"]
if data_args.max_train_samples is not None:
__lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_train_samples )
__lowerCAmelCase = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
__lowerCAmelCase = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
__lowerCAmelCase = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
__lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_eval_samples )
__lowerCAmelCase = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
__lowerCAmelCase = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__lowerCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase : Dict ):
__lowerCAmelCase , __lowerCAmelCase = eval_predictions
__lowerCAmelCase = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
__lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("train" , lowerCamelCase )
trainer.save_metrics("train" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
__lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("eval" , lowerCamelCase )
trainer.save_metrics("eval" , lowerCamelCase )
__lowerCAmelCase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 39
| 0
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = """van"""
def __init__( self , UpperCamelCase=224 , UpperCamelCase=3 , UpperCamelCase=[7, 3, 3, 3] , UpperCamelCase=[4, 2, 2, 2] , UpperCamelCase=[64, 128, 320, 512] , UpperCamelCase=[3, 3, 12, 3] , UpperCamelCase=[8, 8, 4, 4] , UpperCamelCase="gelu" , UpperCamelCase=0.02 , UpperCamelCase=1E-6 , UpperCamelCase=1E-2 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , **UpperCamelCase , ) -> Dict:
super().__init__(**__a )
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = patch_sizes
__lowerCAmelCase = strides
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = mlp_ratios
__lowerCAmelCase = hidden_act
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = layer_scale_init_value
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = dropout_rate
| 711
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {}
lowerCAmelCase : Dict[Optional[str], str] = {}
lowerCAmelCase : Dict[Optional[str], Exception] = {}
def __lowerCAmelCase ( lowerCamelCase : type , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None , ):
'''simple docstring'''
__lowerCAmelCase = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
__lowerCAmelCase = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
__lowerCAmelCase = format_type
def __lowerCAmelCase ( lowerCamelCase : Exception , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None ):
'''simple docstring'''
__lowerCAmelCase = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__lowerCAmelCase = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
lowerCAmelCase : Optional[int] = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
lowerCAmelCase : str = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
lowerCAmelCase : Any = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def __lowerCAmelCase ( lowerCamelCase : Optional[str] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __lowerCAmelCase ( lowerCamelCase : Optional[str] , **lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCAmelCase = get_format_type_from_alias(lowerCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 39
| 0
|
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCAmelCase : str = logging.getLogger(__name__)
lowerCAmelCase : Optional[int] = '''pytorch_model.bin'''
@dataclasses.dataclass
class UpperCAmelCase__ :
a : Union[str, Any] = dataclasses.field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""} )
a : Optional[int] = dataclasses.field(
default=lowercase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} , )
@dataclasses.dataclass
class UpperCAmelCase__ :
a : Optional[int] = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""} )
a : Optional[Any] = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""} )
a : str = dataclasses.field(
default=lowercase_ , metadata={"""help""": """A csv or a json file containing the validation data."""} )
a : List[Any] = dataclasses.field(
default=lowercase_ , metadata={"""help""": """The name of the task to train on."""} , )
a : Optional[int] = dataclasses.field(
default=lowercase_ , metadata={"""help""": """The list of labels for the task."""} )
@dataclasses.dataclass
class UpperCAmelCase__ :
a : List[Any] = dataclasses.field(
metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""} )
a : List[Any] = dataclasses.field(
default="""accuracy""" , metadata={"""help""": """The evaluation metric used for the task."""} )
a : Optional[Any] = dataclasses.field(
default="""no""" , metadata={
"""help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"""
} , )
a : int = dataclasses.field(
default=1_0 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
a : List[Any] = dataclasses.field(
default=0.0 , metadata={
"""help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions."""
} , )
a : Optional[int] = dataclasses.field(
default=lowercase_ , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} , )
a : int = dataclasses.field(
default=lowercase_ , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} , )
a : int = dataclasses.field(
default=lowercase_ , metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} , )
a : Any = dataclasses.field(
default=0.0 , metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} , )
a : Tuple = dataclasses.field(
default=1_0_0 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
a : int = dataclasses.field(
default=lowercase_ , metadata={"""help""": """Random seed for initialization."""} , )
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCAmelCase = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__lowerCAmelCase = dataset.filter(lambda lowerCamelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__lowerCAmelCase = int(eval_result * len(lowerCamelCase_ ) )
print(lowerCamelCase_ )
__lowerCAmelCase = dataset.sort("probability" , reverse=lowerCamelCase_ )
__lowerCAmelCase = dataset.select(range(lowerCamelCase_ ) )
__lowerCAmelCase = dataset.remove_columns(["label", "probability"] )
__lowerCAmelCase = dataset.rename_column("prediction" , "label" )
__lowerCAmelCase = dataset.map(lambda lowerCamelCase : {"label": idalabel[example["label"]]} )
__lowerCAmelCase = dataset.shuffle(seed=args.seed )
__lowerCAmelCase = os.path.join(lowerCamelCase_ , f'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(lowerCamelCase_ , index=lowerCamelCase_ )
else:
dataset.to_json(lowerCamelCase_ )
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : int , **lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__lowerCAmelCase = STModelArguments(model_name_or_path=lowerCamelCase_ )
__lowerCAmelCase = STDataArguments(train_file=lowerCamelCase_ , infer_file=lowerCamelCase_ )
__lowerCAmelCase = STTrainingArguments(output_dir=lowerCamelCase_ )
__lowerCAmelCase = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowerCamelCase_ ).items():
setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for key, value in kwargs.items():
if hasattr(lowerCamelCase_ , lowerCamelCase_ ):
setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Sanity checks
__lowerCAmelCase = {}
__lowerCAmelCase = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__lowerCAmelCase = args.train_file
__lowerCAmelCase = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__lowerCAmelCase = args.eval_file
for key in data_files:
__lowerCAmelCase = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
__lowerCAmelCase = extension
else:
assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
__lowerCAmelCase = f'''{args.output_dir}/self-train_iter-{{}}'''.format
__lowerCAmelCase = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
accelerator.wait_for_everyone()
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = 0
__lowerCAmelCase = False
# Show the progress bar
__lowerCAmelCase = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__lowerCAmelCase = data_dir_format(lowerCamelCase_ )
assert os.path.exists(lowerCamelCase_ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__lowerCAmelCase = os.path.join(lowerCamelCase_ , "stage-1" )
__lowerCAmelCase = {
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowerCamelCase_ , lowerCamelCase_ ):
arguments_dict.update({key: value} )
__lowerCAmelCase = os.path.join(lowerCamelCase_ , "best-checkpoint" , lowerCamelCase_ )
if os.path.exists(lowerCamelCase_ ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , lowerCamelCase_ , lowerCamelCase_ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , lowerCamelCase_ )
finetune(**lowerCamelCase_ )
accelerator.wait_for_everyone()
assert os.path.exists(lowerCamelCase_ )
logger.info("Self-training job completed: iteration: %d, stage: 1." , lowerCamelCase_ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__lowerCAmelCase = os.path.join(lowerCamelCase_ , "best-checkpoint" )
__lowerCAmelCase = os.path.join(lowerCamelCase_ , "stage-2" )
# Update arguments_dict
__lowerCAmelCase = model_path
__lowerCAmelCase = data_files["""train"""]
__lowerCAmelCase = current_output_dir
__lowerCAmelCase = os.path.join(lowerCamelCase_ , "best-checkpoint" , lowerCamelCase_ )
if os.path.exists(lowerCamelCase_ ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , lowerCamelCase_ , lowerCamelCase_ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , lowerCamelCase_ )
finetune(**lowerCamelCase_ )
accelerator.wait_for_everyone()
assert os.path.exists(lowerCamelCase_ )
logger.info("Self-training job completed: iteration: %d, stage: 2." , lowerCamelCase_ )
__lowerCAmelCase = iteration
__lowerCAmelCase = data_dir_format(iteration + 1 )
__lowerCAmelCase = AutoConfig.from_pretrained(os.path.join(lowerCamelCase_ , "best-checkpoint" ) )
__lowerCAmelCase = config.idalabel
__lowerCAmelCase = os.path.join(lowerCamelCase_ , "eval_results_best-checkpoint.json" )
__lowerCAmelCase = os.path.join(lowerCamelCase_ , "test_results_best-checkpoint.json" )
assert os.path.exists(lowerCamelCase_ )
with open(lowerCamelCase_ , "r" ) as f:
__lowerCAmelCase = float(json.load(lowerCamelCase_ )[args.eval_metric] )
__lowerCAmelCase = os.path.join(lowerCamelCase_ , "infer_output_best-checkpoint.csv" )
assert os.path.exists(lowerCamelCase_ )
# Loading the dataset from local csv or json files.
__lowerCAmelCase = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["""data"""]
__lowerCAmelCase = load_dataset("csv" , data_files={"data": infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
shutil.copy(lowerCamelCase_ , os.path.join(lowerCamelCase_ , f'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(lowerCamelCase_ ):
shutil.copy(lowerCamelCase_ , os.path.join(lowerCamelCase_ , f'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
accelerator.wait_for_everyone()
__lowerCAmelCase = os.path.join(lowerCamelCase_ , f'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__lowerCAmelCase = eval_result
if best_iteration is None:
__lowerCAmelCase = new_iteration
__lowerCAmelCase = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__lowerCAmelCase = new_iteration
__lowerCAmelCase = new_eval_result
__lowerCAmelCase = 0
else:
if new_eval_result == best_eval_result:
__lowerCAmelCase = new_iteration
__lowerCAmelCase = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__lowerCAmelCase = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , lowerCamelCase_ )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , lowerCamelCase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCamelCase_ , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(lowerCamelCase_ , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , lowerCamelCase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCamelCase_ , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(lowerCamelCase_ , "eval_results_best-iteration.json" ) , )
| 712
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCAmelCase ( lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
__lowerCAmelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCAmelCase = [4, 4, 4, 4]
__lowerCAmelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
else:
__lowerCAmelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCAmelCase = 96
elif "small" in model_name:
__lowerCAmelCase = 96
elif "base" in model_name:
__lowerCAmelCase = 1_28
elif "large" in model_name:
__lowerCAmelCase = 1_92
elif "xlarge" in model_name:
__lowerCAmelCase = 2_56
elif "huge" in model_name:
__lowerCAmelCase = 3_52
# set label information
__lowerCAmelCase = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
__lowerCAmelCase = "imagenet-22k-id2label.json"
else:
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = FocalNetConfig(
embed_dim=lowerCamelCase , depths=lowerCamelCase , focal_levels=lowerCamelCase , focal_windows=lowerCamelCase , use_conv_embed=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase , use_post_layernorm=lowerCamelCase , use_layerscale=lowerCamelCase , )
return config
def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__lowerCAmelCase = "encoder." + name
if "encoder.layers" in name:
__lowerCAmelCase = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
__lowerCAmelCase = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
__lowerCAmelCase = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCAmelCase = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCAmelCase = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCAmelCase = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
__lowerCAmelCase = "layernorm.weight"
if name == "norm.bias":
__lowerCAmelCase = "layernorm.bias"
if "head" in name:
__lowerCAmelCase = name.replace("head" , "classifier" )
else:
__lowerCAmelCase = "focalnet." + name
return name
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
__lowerCAmelCase = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
__lowerCAmelCase = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCamelCase )
__lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
__lowerCAmelCase = state_dict.pop(lowerCamelCase )
__lowerCAmelCase = val
__lowerCAmelCase = get_focalnet_config(lowerCamelCase )
__lowerCAmelCase = FocalNetForImageClassification(lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase )
# verify conversion
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = BitImageProcessor(
do_resize=lowerCamelCase , size={"shortest_edge": 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase , crop_size=2_24 , do_normalize=lowerCamelCase , image_mean=lowerCamelCase , image_std=lowerCamelCase , )
__lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
__lowerCAmelCase = processor(images=lowerCamelCase , return_tensors="pt" )
__lowerCAmelCase = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
__lowerCAmelCase = image_transforms(lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase , atol=1e-4 )
__lowerCAmelCase = model(**lowerCamelCase )
__lowerCAmelCase = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCAmelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
__lowerCAmelCase = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
__lowerCAmelCase = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
__lowerCAmelCase = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
__lowerCAmelCase = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
__lowerCAmelCase = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print(f'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(f'''{model_name}''' )
processor.push_to_hub(f'''{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 39
| 0
|
'''simple docstring'''
from pathlib import Path
import fire
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = Path(A__ )
__lowerCAmelCase = Path(A__ )
dest_dir.mkdir(exist_ok=A__ )
for path in src_dir.iterdir():
__lowerCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n]
__lowerCAmelCase = dest_dir.joinpath(path.name )
print(A__ )
dest_path.open("w" ).write("\n".join(A__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 713
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase : str = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase : Optional[Any] = {
'''squeezebert/squeezebert-uncased''': 5_1_2,
'''squeezebert/squeezebert-mnli''': 5_1_2,
'''squeezebert/squeezebert-mnli-headless''': 5_1_2,
}
lowerCAmelCase : Tuple = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Dict = VOCAB_FILES_NAMES
a : Any = PRETRAINED_VOCAB_FILES_MAP
a : Dict = PRETRAINED_INIT_CONFIGURATION
a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[Any] = SqueezeBertTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ) -> List[Any]:
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(UpperCamelCase , normalizer_state.pop("type" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**UpperCamelCase )
__lowerCAmelCase = do_lower_case
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None ) -> str:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
lowerCAmelCase : List[Any] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase : Any = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 714
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(lowerCamelCase ) / len(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
| 0
|
'''simple docstring'''
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : Dict , ):
'''simple docstring'''
__lowerCAmelCase = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
__lowerCAmelCase , __lowerCAmelCase = input_paths_and_base_extractors[compression_format]
if input_path is None:
__lowerCAmelCase = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(a__ )
assert base_extractor.is_extractable(a__ )
__lowerCAmelCase = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(a__ , a__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__lowerCAmelCase = file_path.read_text(encoding="utf-8" )
else:
__lowerCAmelCase = output_path.read_text(encoding="utf-8" )
__lowerCAmelCase = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any , lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
__lowerCAmelCase = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
__lowerCAmelCase = input_paths[compression_format]
if input_path is None:
__lowerCAmelCase = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(a__ )
__lowerCAmelCase = Extractor.infer_extractor_format(a__ )
assert extractor_format is not None
__lowerCAmelCase = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(a__ , a__ , a__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__lowerCAmelCase = file_path.read_text(encoding="utf-8" )
else:
__lowerCAmelCase = output_path.read_text(encoding="utf-8" )
__lowerCAmelCase = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
import tarfile
__lowerCAmelCase = tmp_path / "data_dot_dot"
directory.mkdir()
__lowerCAmelCase = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(a__ , "w" ) as f:
f.add(a__ , arcname=os.path.join(".." , text_file.name ) )
return path
@pytest.fixture
def __lowerCAmelCase ( lowerCamelCase : Optional[int] ):
'''simple docstring'''
import tarfile
__lowerCAmelCase = tmp_path / "data_sym_link"
directory.mkdir()
__lowerCAmelCase = directory / "tar_file_with_sym_link.tar"
os.symlink(".." , directory / "subdir" , target_is_directory=a__ )
with tarfile.TarFile(a__ , "w" ) as f:
f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , )
def __lowerCAmelCase ( lowerCamelCase : Dict , lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCAmelCase = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
__lowerCAmelCase = insecure_tar_files[insecure_tar_file]
__lowerCAmelCase = tmp_path / "extracted"
TarExtractor.extract(a__ , a__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __lowerCAmelCase ( lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
__lowerCAmelCase = (
B"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
B"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
B"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
B"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(a__ )
assert zipfile.is_zipfile(str(a__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(a__ ) # but we're right
| 715
|
'''simple docstring'''
import re
def __lowerCAmelCase ( lowerCamelCase : str ):
'''simple docstring'''
__lowerCAmelCase = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowerCamelCase , lowerCamelCase ) )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 39
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase , UpperCamelCase=3 , UpperCamelCase=32 , UpperCamelCase=3 , UpperCamelCase=10 , UpperCamelCase=[10, 20, 30, 40] , UpperCamelCase=[1, 1, 2, 1] , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase="relu" , UpperCamelCase=3 , UpperCamelCase=None , ) -> Optional[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embeddings_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = len(lowerCAmelCase_ )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
__lowerCAmelCase = RegNetModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = RegNetForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
a : str = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
a : Dict = (
{'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification}
if is_torch_available()
else {}
)
a : str = False
a : Optional[Any] = False
a : Any = False
a : Tuple = False
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = RegNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def UpperCAmelCase_ ( self ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self ) -> Dict:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
pass
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=lowerCAmelCase_ )
for name, module in model.named_modules():
if isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def UpperCAmelCase_ ( self ) -> List[Any]:
def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowerCAmelCase = layer_type
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def UpperCAmelCase_ ( self ) -> Any:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = RegNetModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self ) -> Dict:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase_ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors="pt" ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 716
|
'''simple docstring'''
import os
import sys
import unittest
lowerCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = {"BertModelTest": "BertModelTester"}
__lowerCAmelCase = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase )
__lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase )
__lowerCAmelCase = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
__lowerCAmelCase = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
__lowerCAmelCase = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
__lowerCAmelCase = name
__lowerCAmelCase = val
def __str__( self ) -> List[Any]:
return F'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self , UpperCamelCase ) -> Dict:
return self.val < other.val
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase ) -> Dict:
__lowerCAmelCase = {}
__lowerCAmelCase = {}
__lowerCAmelCase = self.build_heap(UpperCAmelCase__ )
def __getitem__( self , UpperCamelCase ) -> int:
return self.get_value(UpperCAmelCase__ )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[Any]:
return (idx - 1) // 2
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Dict:
return idx * 2 + 1
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Union[str, Any]:
return idx * 2 + 2
def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[Any]:
return self.heap_dict[key]
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Union[str, Any]:
__lowerCAmelCase = len(UpperCAmelCase__ ) - 1
__lowerCAmelCase = self.get_parent_idx(UpperCAmelCase__ )
for idx, i in enumerate(UpperCAmelCase__ ):
__lowerCAmelCase = idx
__lowerCAmelCase = i.val
for i in range(UpperCAmelCase__ , -1 , -1 ):
self.sift_down(UpperCAmelCase__ , UpperCAmelCase__ )
return array
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
while True:
__lowerCAmelCase = self.get_left_child_idx(UpperCAmelCase__ ) # noqa: E741
__lowerCAmelCase = self.get_right_child_idx(UpperCAmelCase__ )
__lowerCAmelCase = idx
if l < len(UpperCAmelCase__ ) and array[l] < array[idx]:
__lowerCAmelCase = l
if r < len(UpperCAmelCase__ ) and array[r] < array[smallest]:
__lowerCAmelCase = r
if smallest != idx:
__lowerCAmelCase = array[smallest], array[idx]
(
__lowerCAmelCase
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
__lowerCAmelCase = smallest
else:
break
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[Any]:
__lowerCAmelCase = self.get_parent_idx(UpperCAmelCase__ )
while p >= 0 and self.heap[p] > self.heap[idx]:
__lowerCAmelCase = self.heap[idx], self.heap[p]
__lowerCAmelCase = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
__lowerCAmelCase = p
__lowerCAmelCase = self.get_parent_idx(UpperCAmelCase__ )
def UpperCAmelCase_ ( self ) -> Tuple:
return self.heap[0]
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = self.heap[-1], self.heap[0]
__lowerCAmelCase = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
__lowerCAmelCase = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
self.heap.append(UpperCAmelCase__ )
__lowerCAmelCase = len(self.heap ) - 1
__lowerCAmelCase = node.val
self.sift_up(len(self.heap ) - 1 )
def UpperCAmelCase_ ( self ) -> Dict:
return len(self.heap ) == 0
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> List[str]:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
__lowerCAmelCase = new_value
__lowerCAmelCase = new_value
self.sift_up(self.idx_of_element[node] )
lowerCAmelCase : List[str] = Node('''R''', -1)
lowerCAmelCase : Union[str, Any] = Node('''B''', 6)
lowerCAmelCase : Union[str, Any] = Node('''A''', 3)
lowerCAmelCase : Optional[int] = Node('''X''', 1)
lowerCAmelCase : List[Any] = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
lowerCAmelCase : Tuple = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : torch.FloatTensor
class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self , UpperCamelCase = 16 , UpperCamelCase = 88 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = 1 , UpperCamelCase = 0.0 , UpperCamelCase = 32 , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = "geglu" , UpperCamelCase = True , UpperCamelCase = True , ) -> List[str]:
super().__init__()
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = attention_head_dim
__lowerCAmelCase = num_attention_heads * attention_head_dim
__lowerCAmelCase = in_channels
__lowerCAmelCase = torch.nn.GroupNorm(num_groups=UpperCamelCase , num_channels=UpperCamelCase , eps=1E-6 , affine=UpperCamelCase )
__lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase )
# 3. Define transformers blocks
__lowerCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , cross_attention_dim=UpperCamelCase , activation_fn=UpperCamelCase , attention_bias=UpperCamelCase , double_self_attention=UpperCamelCase , norm_elementwise_affine=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
__lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=1 , UpperCamelCase=None , UpperCamelCase = True , ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = hidden_states.shape
__lowerCAmelCase = batch_frames // num_frames
__lowerCAmelCase = hidden_states
__lowerCAmelCase = hidden_states[None, :].reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__lowerCAmelCase = self.norm(UpperCamelCase )
__lowerCAmelCase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = self.proj_in(UpperCamelCase )
# 2. Blocks
for block in self.transformer_blocks:
__lowerCAmelCase = block(
UpperCamelCase , encoder_hidden_states=UpperCamelCase , timestep=UpperCamelCase , cross_attention_kwargs=UpperCamelCase , class_labels=UpperCamelCase , )
# 3. Output
__lowerCAmelCase = self.proj_out(UpperCamelCase )
__lowerCAmelCase = (
hidden_states[None, None, :]
.reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__lowerCAmelCase = hidden_states.reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowerCAmelCase : str = HfApi()
lowerCAmelCase : int = {}
# fmt: off
lowerCAmelCase : Union[str, Any] = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
lowerCAmelCase : Tuple = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
lowerCAmelCase : List[Any] = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
lowerCAmelCase : List[Any] = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
lowerCAmelCase : Any = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
lowerCAmelCase : Any = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
lowerCAmelCase : Any = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
lowerCAmelCase : Union[str, Any] = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
lowerCAmelCase : Dict = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
lowerCAmelCase : str = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
lowerCAmelCase : int = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
lowerCAmelCase : int = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
lowerCAmelCase : Dict = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
lowerCAmelCase : Optional[int] = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
lowerCAmelCase : Dict = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
lowerCAmelCase : Optional[int] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowerCAmelCase : Union[str, Any] = """/home/patrick/google_checkpoints/""" + mod.modelId.split('''/''')[-1]
print(f'Started running {mod.modelId}!!!')
if mod.modelId.startswith('''CompVis'''):
lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
lowerCAmelCase : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowerCAmelCase : Optional[Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowerCAmelCase : str = torch.tensor([1_0] * noise.shape[0])
with torch.no_grad():
lowerCAmelCase : Tuple = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :3_0], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1e-3
)
print(f'{mod.modelId} has passed successfully!!!')
| 718
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCAmelCase ( lowerCamelCase : bytes , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = f'''{sampling_rate}'''
__lowerCAmelCase = "1"
__lowerCAmelCase = "f32le"
__lowerCAmelCase = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__lowerCAmelCase = ffmpeg_process.communicate(lowerCamelCase )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
__lowerCAmelCase = output_stream[0]
__lowerCAmelCase = np.frombuffer(lowerCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : str = "f32le" , ):
'''simple docstring'''
__lowerCAmelCase = f'''{sampling_rate}'''
__lowerCAmelCase = "1"
if format_for_conversion == "s16le":
__lowerCAmelCase = 2
elif format_for_conversion == "f32le":
__lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
__lowerCAmelCase = platform.system()
if system == "Linux":
__lowerCAmelCase = "alsa"
__lowerCAmelCase = "default"
elif system == "Darwin":
__lowerCAmelCase = "avfoundation"
__lowerCAmelCase = ":0"
elif system == "Windows":
__lowerCAmelCase = "dshow"
__lowerCAmelCase = "default"
__lowerCAmelCase = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
__lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__lowerCAmelCase = _ffmpeg_stream(lowerCamelCase , lowerCamelCase )
for item in iterator:
yield item
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[Tuple[float, float], float]] = None , lowerCamelCase : str = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
__lowerCAmelCase = stream_chunk_s
else:
__lowerCAmelCase = chunk_length_s
__lowerCAmelCase = ffmpeg_microphone(lowerCamelCase , lowerCamelCase , format_for_conversion=lowerCamelCase )
if format_for_conversion == "s16le":
__lowerCAmelCase = np.intaa
__lowerCAmelCase = 2
elif format_for_conversion == "f32le":
__lowerCAmelCase = np.floataa
__lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
__lowerCAmelCase = chunk_length_s / 6
__lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCamelCase , (int, float) ):
__lowerCAmelCase = [stride_length_s, stride_length_s]
__lowerCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__lowerCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__lowerCAmelCase = datetime.datetime.now()
__lowerCAmelCase = datetime.timedelta(seconds=lowerCamelCase )
for item in chunk_bytes_iter(lowerCamelCase , lowerCamelCase , stride=(stride_left, stride_right) , stream=lowerCamelCase ):
# Put everything back in numpy scale
__lowerCAmelCase = np.frombuffer(item["raw"] , dtype=lowerCamelCase )
__lowerCAmelCase = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
__lowerCAmelCase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Tuple[int, int] , lowerCamelCase : bool = False ):
'''simple docstring'''
__lowerCAmelCase = B""
__lowerCAmelCase , __lowerCAmelCase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
__lowerCAmelCase = 0
for raw in iterator:
acc += raw
if stream and len(lowerCamelCase ) < chunk_len:
__lowerCAmelCase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
__lowerCAmelCase = (_stride_left, stride_right)
__lowerCAmelCase = {"raw": acc[:chunk_len], "stride": stride}
if stream:
__lowerCAmelCase = False
yield item
__lowerCAmelCase = stride_left
__lowerCAmelCase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCamelCase ) > stride_left:
__lowerCAmelCase = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
__lowerCAmelCase = False
yield item
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCamelCase , stdout=subprocess.PIPE , bufsize=lowerCamelCase ) as ffmpeg_process:
while True:
__lowerCAmelCase = ffmpeg_process.stdout.read(lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 39
| 0
|
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ ( __lowerCamelCase ):
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase="None" , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=None , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = relative_attention
__lowerCAmelCase = position_biased_input
__lowerCAmelCase = pos_att_type
__lowerCAmelCase = scope
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self ) -> Tuple:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
__lowerCAmelCase = DebertaVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )[0]
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )[0]
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
__lowerCAmelCase = DebertaVaForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = DebertaVaForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = DebertaVaForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
__lowerCAmelCase = DebertaVaForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCAmelCase = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
__lowerCAmelCase = DebertaVaForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
a : int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
a : List[str] = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
a : List[Any] = True
a : Optional[Any] = False
a : int = False
a : List[str] = False
a : Union[str, Any] = False
def UpperCAmelCase_ ( self ) -> int:
__lowerCAmelCase = DebertaVaModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase_ ( self ) -> Dict:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase_ ( self ) -> int:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = DebertaVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def UpperCAmelCase_ ( self ) -> Any:
pass
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" )
__lowerCAmelCase = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__lowerCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
__lowerCAmelCase = torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 719
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __lowerCAmelCase ( lowerCamelCase : List[str] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCAmelCase__ ( UpperCamelCase__ ):
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase ) -> Tuple:
__lowerCAmelCase = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=UpperCamelCase , default=UpperCamelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=UpperCamelCase , help="Name of the model to download" )
download_parser.set_defaults(func=UpperCamelCase )
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
__lowerCAmelCase = model
__lowerCAmelCase = cache
__lowerCAmelCase = force
__lowerCAmelCase = trust_remote_code
def UpperCAmelCase_ ( self ) -> Any:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 39
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase__ ( _snake_case ):
a : int = """mobilenet_v1"""
def __init__( self , UpperCamelCase=3 , UpperCamelCase=224 , UpperCamelCase=1.0 , UpperCamelCase=8 , UpperCamelCase="relu6" , UpperCamelCase=True , UpperCamelCase=0.9_99 , UpperCamelCase=0.02 , UpperCamelCase=0.0_01 , **UpperCamelCase , ) -> Any:
super().__init__(**snake_case_ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
__lowerCAmelCase = num_channels
__lowerCAmelCase = image_size
__lowerCAmelCase = depth_multiplier
__lowerCAmelCase = min_depth
__lowerCAmelCase = hidden_act
__lowerCAmelCase = tf_padding
__lowerCAmelCase = classifier_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
class UpperCAmelCase__ ( _snake_case ):
a : Optional[Any] = version.parse("""1.11""" )
@property
def UpperCAmelCase_ ( self ) -> Any:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def UpperCAmelCase_ ( self ) -> Dict:
return 1E-4
| 720
|
'''simple docstring'''
def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = 1
__lowerCAmelCase = 2
while i * i <= n:
__lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = 1
__lowerCAmelCase = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCamelCase ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 39
| 0
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowerCAmelCase : int = logging.get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
__lowerCAmelCase = question_encoder
__lowerCAmelCase = generator
__lowerCAmelCase = self.question_encoder
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Union[str, Any]:
if os.path.isfile(UpperCAmelCase__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
__lowerCAmelCase = os.path.join(UpperCAmelCase__ , "question_encoder_tokenizer" )
__lowerCAmelCase = os.path.join(UpperCAmelCase__ , "generator_tokenizer" )
self.question_encoder.save_pretrained(UpperCAmelCase__ )
self.generator.save_pretrained(UpperCAmelCase__ )
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> Dict:
from ..auto.tokenization_auto import AutoTokenizer
__lowerCAmelCase = kwargs.pop("config" , UpperCAmelCase__ )
if config is None:
__lowerCAmelCase = RagConfig.from_pretrained(UpperCAmelCase__ )
__lowerCAmelCase = AutoTokenizer.from_pretrained(
UpperCAmelCase__ , config=config.question_encoder , subfolder="question_encoder_tokenizer" )
__lowerCAmelCase = AutoTokenizer.from_pretrained(
UpperCAmelCase__ , config=config.generator , subfolder="generator_tokenizer" )
return cls(question_encoder=UpperCAmelCase__ , generator=UpperCAmelCase__ )
def __call__( self , *UpperCamelCase , **UpperCamelCase ) -> int:
return self.current_tokenizer(*UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self , *UpperCamelCase , **UpperCamelCase ) -> List[Any]:
return self.generator.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self , *UpperCamelCase , **UpperCamelCase ) -> Dict:
return self.generator.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.question_encoder
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = self.generator
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = "longest" , UpperCamelCase = None , UpperCamelCase = True , **UpperCamelCase , ) -> Any:
warnings.warn(
"`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
"regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
"context manager to prepare your targets. See the documentation of your specific tokenizer for more "
"details" , UpperCAmelCase__ , )
if max_length is None:
__lowerCAmelCase = self.current_tokenizer.model_max_length
__lowerCAmelCase = self(
UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , **UpperCAmelCase__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__lowerCAmelCase = self.current_tokenizer.model_max_length
__lowerCAmelCase = self(
text_target=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ , **UpperCAmelCase__ , )
__lowerCAmelCase = labels['''input_ids''']
return model_inputs
| 721
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Optional[Any] = """dpr"""
def __init__( self , UpperCamelCase=3_0522 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=1E-12 , UpperCamelCase=0 , UpperCamelCase="absolute" , UpperCamelCase = 0 , **UpperCamelCase , ) -> Tuple:
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = projection_dim
__lowerCAmelCase = position_embedding_type
| 39
| 0
|
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase : List[str] = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
__lowerCAmelCase = graph
# mapping node to its parent in resulting breadth first tree
__lowerCAmelCase = {}
__lowerCAmelCase = source_vertex
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = {self.source_vertex}
__lowerCAmelCase = None
__lowerCAmelCase = [self.source_vertex] # first in first out queue
while queue:
__lowerCAmelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_lowerCamelCase )
__lowerCAmelCase = vertex
queue.append(_lowerCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Union[str, Any]:
if target_vertex == self.source_vertex:
return self.source_vertex
__lowerCAmelCase = self.parent.get(_lowerCamelCase )
if target_vertex_parent is None:
__lowerCAmelCase = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(_lowerCamelCase )
return self.shortest_path(_lowerCamelCase ) + F'''->{target_vertex}'''
if __name__ == "__main__":
lowerCAmelCase : List[Any] = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 700
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
| 0
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : int = logging.get_logger(__name__)
def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = os.path.abspath(lowerCamelCase_ )
logger.info(f'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
__lowerCAmelCase = tf.train.list_variables(lowerCamelCase_ )
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__lowerCAmelCase = full_name.split("/" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(f'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
__lowerCAmelCase = name[1:]
# figure out how many levels deep the name is
__lowerCAmelCase = 0
for _name in name:
if _name.startswith("layer_with_weights" ):
depth += 1
else:
break
layer_depth.append(lowerCamelCase_ )
# read data
__lowerCAmelCase = tf.train.load_variable(lowerCamelCase_ , lowerCamelCase_ )
names.append("/".join(lowerCamelCase_ ) )
arrays.append(lowerCamelCase_ )
logger.info(f'''Read a total of {len(lowerCamelCase_ ):,} layers''' )
# Sanity check
if len(set(lowerCamelCase_ ) ) != 1:
raise ValueError(f'''Found layer names with different depths (layer depth {list(set(lowerCamelCase_ ) )})''' )
__lowerCAmelCase = list(set(lowerCamelCase_ ) )[0]
if layer_depth != 1:
raise ValueError(
"The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"
" heads." )
# convert layers
logger.info("Converting weights..." )
for full_name, array in zip(lowerCamelCase_ , lowerCamelCase_ ):
__lowerCAmelCase = full_name.split("/" )
__lowerCAmelCase = model
__lowerCAmelCase = []
for i, m_name in enumerate(lowerCamelCase_ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("layer_with_weights" ):
__lowerCAmelCase = int(m_name.split("-" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["embeddings", "LayerNorm"] )
__lowerCAmelCase = getattr(lowerCamelCase_ , "embeddings" )
__lowerCAmelCase = getattr(lowerCamelCase_ , "LayerNorm" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["encoder", "layer", str(layer_num - 4 )] )
__lowerCAmelCase = getattr(lowerCamelCase_ , "encoder" )
__lowerCAmelCase = getattr(lowerCamelCase_ , "layer" )
__lowerCAmelCase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["pooler", "dense"] )
__lowerCAmelCase = getattr(lowerCamelCase_ , "pooler" )
__lowerCAmelCase = getattr(lowerCamelCase_ , "dense" )
elif m_name == "embeddings":
trace.append("embeddings" )
__lowerCAmelCase = getattr(lowerCamelCase_ , "embeddings" )
if layer_num == 0:
trace.append("word_embeddings" )
__lowerCAmelCase = getattr(lowerCamelCase_ , "word_embeddings" )
elif layer_num == 1:
trace.append("position_embeddings" )
__lowerCAmelCase = getattr(lowerCamelCase_ , "position_embeddings" )
elif layer_num == 2:
trace.append("token_type_embeddings" )
__lowerCAmelCase = getattr(lowerCamelCase_ , "token_type_embeddings" )
else:
raise ValueError(f'''Unknown embedding layer with name {full_name}''' )
trace.append("weight" )
__lowerCAmelCase = getattr(lowerCamelCase_ , "weight" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["attention", "self"] )
__lowerCAmelCase = getattr(lowerCamelCase_ , "attention" )
__lowerCAmelCase = getattr(lowerCamelCase_ , "self" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["attention", "output", "LayerNorm"] )
__lowerCAmelCase = getattr(lowerCamelCase_ , "attention" )
__lowerCAmelCase = getattr(lowerCamelCase_ , "output" )
__lowerCAmelCase = getattr(lowerCamelCase_ , "LayerNorm" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["attention", "output", "dense"] )
__lowerCAmelCase = getattr(lowerCamelCase_ , "attention" )
__lowerCAmelCase = getattr(lowerCamelCase_ , "output" )
__lowerCAmelCase = getattr(lowerCamelCase_ , "dense" )
elif m_name == "_output_dense":
# output dense
trace.extend(["output", "dense"] )
__lowerCAmelCase = getattr(lowerCamelCase_ , "output" )
__lowerCAmelCase = getattr(lowerCamelCase_ , "dense" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["output", "LayerNorm"] )
__lowerCAmelCase = getattr(lowerCamelCase_ , "output" )
__lowerCAmelCase = getattr(lowerCamelCase_ , "LayerNorm" )
elif m_name == "_key_dense":
# attention key
trace.append("key" )
__lowerCAmelCase = getattr(lowerCamelCase_ , "key" )
elif m_name == "_query_dense":
# attention query
trace.append("query" )
__lowerCAmelCase = getattr(lowerCamelCase_ , "query" )
elif m_name == "_value_dense":
# attention value
trace.append("value" )
__lowerCAmelCase = getattr(lowerCamelCase_ , "value" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["intermediate", "dense"] )
__lowerCAmelCase = getattr(lowerCamelCase_ , "intermediate" )
__lowerCAmelCase = getattr(lowerCamelCase_ , "dense" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("output" )
__lowerCAmelCase = getattr(lowerCamelCase_ , "output" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("bias" )
__lowerCAmelCase = getattr(lowerCamelCase_ , "bias" )
elif m_name in ["kernel", "gamma"]:
trace.append("weight" )
__lowerCAmelCase = getattr(lowerCamelCase_ , "weight" )
else:
logger.warning(f'''Ignored {m_name}''' )
# for certain layers reshape is necessary
__lowerCAmelCase = '''.'''.join(lowerCamelCase_ )
if re.match(r"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" , lowerCamelCase_ ) or re.match(
r"(\S+)\.attention\.output\.dense\.weight" , lowerCamelCase_ ):
__lowerCAmelCase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__lowerCAmelCase = array.transpose()
if pointer.shape == array.shape:
__lowerCAmelCase = torch.from_numpy(lowerCamelCase_ )
else:
raise ValueError(
f'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
f''' {array.shape}''' )
logger.info(f'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : str , lowerCamelCase : List[str] ):
'''simple docstring'''
logger.info(f'''Loading model based on config from {config_path}...''' )
__lowerCAmelCase = BertConfig.from_json_file(lowerCamelCase_ )
__lowerCAmelCase = BertModel(lowerCamelCase_ )
# Load weights from checkpoint
logger.info(f'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
logger.info(f'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , lowerCamelCase_ )
if __name__ == "__main__":
lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
lowerCAmelCase : Tuple = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 701
|
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCAmelCase = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
__lowerCAmelCase = features.copy()
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , split=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
if issubclass(lowerCamelCase , lowerCamelCase ):
__lowerCAmelCase = jsonl_path
elif issubclass(lowerCamelCase , lowerCamelCase ):
__lowerCAmelCase = [jsonl_path]
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : str=("train",) ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase )
for split in splits:
__lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase ( lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : List[str] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read()
_check_json_datasetdict(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_datasetdict(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : int ):
'''simple docstring'''
if split:
__lowerCAmelCase = {split: jsonl_path}
else:
__lowerCAmelCase = "train"
__lowerCAmelCase = {"train": jsonl_path, "test": jsonl_path}
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_datasetdict(lowerCamelCase , lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return json.load(lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
return [json.loads(lowerCamelCase ) for line in buffer]
class UpperCAmelCase__ :
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json_function(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
assert isinstance(exported_content[0] , UpperCamelCase )
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json_function(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
assert isinstance(exported_content[0] , UpperCamelCase )
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase ) == 10
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any:
with pytest.raises(UpperCamelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
__lowerCAmelCase = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}'''
__lowerCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , compression=UpperCamelCase ).write()
with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f:
__lowerCAmelCase = f.read()
with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f:
__lowerCAmelCase = f.read()
assert exported_content == original_content
| 39
| 0
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase ( lowerCamelCase : List[str] ):
'''simple docstring'''
if not nums:
return 0
__lowerCAmelCase = nums[0]
__lowerCAmelCase = 0
for num in nums[1:]:
__lowerCAmelCase , __lowerCAmelCase = (
max_excluding + num,
max(UpperCamelCase__ , UpperCamelCase__ ),
)
return max(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
| 0
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCAmelCase : List[str] = ['''bert-base-uncased''', '''bert-base-cased''']
lowerCAmelCase : Optional[int] = '''hf-internal-testing/tiny-bert-tf-only'''
if is_tf_available():
class UpperCAmelCase__ ( tf.keras.Model ):
def __init__( self , UpperCamelCase ) -> str:
super().__init__()
__lowerCAmelCase = tokenizer
__lowerCAmelCase = AutoConfig.from_pretrained(lowercase_ )
__lowerCAmelCase = TFAutoModel.from_config(lowercase_ )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = self.tokenizer(lowercase_ )
__lowerCAmelCase = self.bert(**lowercase_ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Dict:
super().setUp()
__lowerCAmelCase = [
BertTokenizer.from_pretrained(lowercase_ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
__lowerCAmelCase = [TFBertTokenizer.from_pretrained(lowercase_ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(lowercase_ , use_fast_bert_tokenizer=lowercase_ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowerCAmelCase = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
__lowerCAmelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
__lowerCAmelCase = tokenizer(lowercase_ , return_tensors="tf" , padding="longest" )
__lowerCAmelCase = tf_tokenizer(lowercase_ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
__lowerCAmelCase = tf_tokenizer(self.paired_sentences )
__lowerCAmelCase = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
__lowerCAmelCase = tf.function(lowercase_ )
for test_inputs in (self.test_sentences, self.paired_sentences):
__lowerCAmelCase = tf.constant(lowercase_ )
__lowerCAmelCase = compiled_tokenizer(lowercase_ )
__lowerCAmelCase = tf_tokenizer(lowercase_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
for tf_tokenizer in self.tf_tokenizers:
__lowerCAmelCase = ModelToSave(tokenizer=lowercase_ )
__lowerCAmelCase = tf.convert_to_tensor(self.test_sentences )
__lowerCAmelCase = model(lowercase_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowerCAmelCase = Path(lowercase_ ) / "saved.model"
model.save(lowercase_ )
__lowerCAmelCase = tf.keras.models.load_model(lowercase_ )
__lowerCAmelCase = loaded_model(lowercase_ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 703
|
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : List[str] = (CMStochasticIterativeScheduler,)
a : str = 1_0
def UpperCAmelCase_ ( self , **UpperCamelCase ) -> str:
__lowerCAmelCase = {
"num_train_timesteps": 201,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
config.update(**UpperCamelCase )
return config
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = 10
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = self.scheduler_classes[0](**UpperCamelCase )
scheduler.set_timesteps(UpperCamelCase )
__lowerCAmelCase = scheduler.timesteps[0]
__lowerCAmelCase = scheduler.timesteps[1]
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ ( self ) -> Any:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = 1
scheduler.set_timesteps(UpperCamelCase )
__lowerCAmelCase = scheduler.timesteps
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(UpperCamelCase ):
# 1. scale model input
__lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# 2. predict noise residual
__lowerCAmelCase = model(UpperCamelCase , UpperCamelCase )
# 3. predict previous sample x_t-1
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [106, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase )
__lowerCAmelCase = scheduler.timesteps
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# 2. predict noise residual
__lowerCAmelCase = model(UpperCamelCase , UpperCamelCase )
# 3. predict previous sample x_t-1
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [39, 30, 12, 15, 0]
with self.assertRaises(UpperCamelCase , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [39, 30, 12, 1, 0]
__lowerCAmelCase = len(UpperCamelCase )
with self.assertRaises(UpperCamelCase , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase , timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase : str = logging.get_logger(__name__)
class UpperCAmelCase__ ( _UpperCAmelCase ):
a : Any = ['''audio_values''', '''audio_mask''']
def __init__( self , UpperCamelCase=2048 , UpperCamelCase=1 , UpperCamelCase=[16, 16] , UpperCamelCase=128 , UpperCamelCase=4_4100 , UpperCamelCase=86 , UpperCamelCase=2048 , UpperCamelCase=0.0 , **UpperCamelCase , ) -> Dict:
super().__init__(
feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ , )
__lowerCAmelCase = spectrogram_length
__lowerCAmelCase = num_channels
__lowerCAmelCase = patch_size
__lowerCAmelCase = feature_size // self.patch_size[1]
__lowerCAmelCase = n_fft
__lowerCAmelCase = sampling_rate // hop_length_to_sampling_rate
__lowerCAmelCase = sampling_rate
__lowerCAmelCase = padding_value
__lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A_ , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=A_ , norm="slaney" , mel_scale="slaney" , ).T
def UpperCAmelCase_ ( self , UpperCamelCase ) -> np.ndarray:
__lowerCAmelCase = spectrogram(
A_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
__lowerCAmelCase = log_spec[:, :-1]
__lowerCAmelCase = log_spec - 20.0
__lowerCAmelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = False , **UpperCamelCase , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
__lowerCAmelCase = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
__lowerCAmelCase = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
__lowerCAmelCase = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCAmelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowerCAmelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A_ ):
__lowerCAmelCase = [np.asarray(A_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowerCAmelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowerCAmelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowerCAmelCase = np.array(A_ ).astype(np.floataa )
# convert into correct format for padding
__lowerCAmelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowerCAmelCase = np.ones([len(A_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowerCAmelCase = padded_audio_features * self.padding_value
for i in range(len(A_ ) ):
__lowerCAmelCase = audio_features[i]
__lowerCAmelCase = feature
# return as BatchFeature
if return_attention_mask:
__lowerCAmelCase = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
__lowerCAmelCase = {"audio_values": padded_audio_features}
__lowerCAmelCase = BatchFeature(data=A_ , tensor_type=A_ )
return encoded_inputs
| 704
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCAmelCase ( lowerCamelCase : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
__lowerCAmelCase = BeautifulSoup(requests.get(lowerCamelCase ).text , "html.parser" )
__lowerCAmelCase = soup.findAll("h1" )
__lowerCAmelCase = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(lowerCamelCase , lowerCamelCase )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'{key}\n{value}\n')
| 39
| 0
|
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCAmelCase__ :
a : List[Any] = None
def UpperCAmelCase_ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> int:
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(UpperCamelCase )
__lowerCAmelCase = self.feature_extraction_class.from_json_file(UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = feat_extract_first.save_pretrained(UpperCamelCase )[0]
check_json_file_has_correct_format(UpperCamelCase )
__lowerCAmelCase = self.feature_extraction_class.from_pretrained(UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase )
| 705
|
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
if len(lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
__lowerCAmelCase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase ) )
]
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase ) )
]
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
if len(lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
__lowerCAmelCase = len(lowerCamelCase )
__lowerCAmelCase = matrix_length // 2
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase )]
__lowerCAmelCase = [
[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )
]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase )]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )]
return top_left, top_right, bot_left, bot_right
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
return len(lowerCamelCase ), len(matrix[0] )
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
print("\n".join(str(lowerCamelCase ) for line in matrix ) )
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
if matrix_dimensions(lowerCamelCase ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase )
__lowerCAmelCase = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
__lowerCAmelCase = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase )
__lowerCAmelCase = matrix_addition(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = matrix_addition(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase )
# construct the new matrix from our 4 quadrants
__lowerCAmelCase = []
for i in range(len(lowerCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
if matrix_dimensions(lowerCamelCase )[1] != matrix_dimensions(lowerCamelCase )[0]:
__lowerCAmelCase = (
"Unable to multiply these matrices, please check the dimensions.\n"
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(lowerCamelCase )
__lowerCAmelCase = matrix_dimensions(lowerCamelCase )
__lowerCAmelCase = matrix_dimensions(lowerCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCAmelCase = max(*lowerCamelCase , *lowerCamelCase )
__lowerCAmelCase = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase ) ) ) )
__lowerCAmelCase = matrixa
__lowerCAmelCase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__lowerCAmelCase = actual_strassen(lowerCamelCase , lowerCamelCase )
# Removing the additional zeros
for i in range(0 , lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCAmelCase : Tuple = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCAmelCase : Any = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 39
| 0
|
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class UpperCAmelCase__ ( UpperCamelCase__ ):
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = 8
# DPR tok
__lowerCAmelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__lowerCAmelCase = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
__lowerCAmelCase = os.path.join(__UpperCamelCase , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
__lowerCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
__lowerCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
__lowerCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
__lowerCAmelCase = {"unk_token": "<unk>"}
__lowerCAmelCase = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
__lowerCAmelCase = os.path.join(__UpperCamelCase , BART_VOCAB_FILES_NAMES["vocab_file"] )
__lowerCAmelCase = os.path.join(__UpperCamelCase , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCamelCase ) )
def UpperCAmelCase_ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase_ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCAmelCase_ ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = self.get_dummy_dataset()
__lowerCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
__lowerCAmelCase = dataset
__lowerCAmelCase = RagRetriever(
__UpperCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
__lowerCAmelCase = self.get_dummy_dataset()
__lowerCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
__lowerCAmelCase = os.path.join(self.tmpdirname , "dataset" )
__lowerCAmelCase = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
__lowerCAmelCase = RagRetriever(
__UpperCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__lowerCAmelCase = RagRetriever(
__UpperCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __UpperCamelCase ) , )
return retriever
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
__lowerCAmelCase = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
__lowerCAmelCase = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
__lowerCAmelCase = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(__UpperCamelCase , open(__UpperCamelCase , "wb" ) )
__lowerCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
__lowerCAmelCase = RagRetriever(
__UpperCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = 1
__lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever()
__lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = retriever.retrieve(__UpperCamelCase , n_docs=__UpperCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , __UpperCamelCase )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
__lowerCAmelCase = self.get_dummy_dataset()
retriever.save_pretrained(__UpperCamelCase )
__lowerCAmelCase = RagRetriever.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCAmelCase = retriever.retrieve(__UpperCamelCase , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = 1
__lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCamelCase )
__lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = retriever.retrieve(__UpperCamelCase , n_docs=__UpperCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , __UpperCamelCase )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCamelCase )
__lowerCAmelCase = RagRetriever.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCAmelCase = retriever.retrieve(__UpperCamelCase , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = 1
__lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCamelCase )
__lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = retriever.retrieve(__UpperCamelCase , n_docs=__UpperCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , __UpperCamelCase )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCamelCase )
__lowerCAmelCase = RagRetriever.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCAmelCase = retriever.retrieve(__UpperCamelCase , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = 1
__lowerCAmelCase = self.get_dummy_legacy_index_retriever()
__lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = retriever.retrieve(__UpperCamelCase , n_docs=__UpperCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , __UpperCamelCase )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCamelCase )
__lowerCAmelCase = RagRetriever.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCAmelCase = retriever.retrieve(__UpperCamelCase , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ) -> str:
import torch
__lowerCAmelCase = 1
__lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever()
__lowerCAmelCase = [[5, 7], [10, 11]]
__lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCAmelCase = retriever(__UpperCamelCase , __UpperCamelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , np.ndarray )
__lowerCAmelCase = retriever(
__UpperCamelCase , __UpperCamelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCamelCase , return_tensors="pt" , )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.get_dpr_ctx_encoder_tokenizer()
__lowerCAmelCase = 1
__lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCamelCase )
retriever.set_ctx_encoder_tokenizer(__UpperCamelCase )
__lowerCAmelCase = [[5, 7], [10, 11]]
__lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCAmelCase = retriever(__UpperCamelCase , __UpperCamelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCamelCase )
self.assertEqual(
len(__UpperCamelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , __UpperCamelCase ) # check for doc token related keys in dictionary.
| 706
|
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowerCAmelCase : Optional[Any] = '''scheduler_config.json'''
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : str = 1
a : Optional[int] = 2
a : int = 3
a : Union[str, Any] = 4
a : int = 5
a : Optional[int] = 6
a : str = 7
a : List[Any] = 8
a : List[str] = 9
a : List[str] = 1_0
a : int = 1_1
a : Any = 1_2
a : Any = 1_3
a : Tuple = 1_4
@dataclass
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : torch.FloatTensor
class UpperCAmelCase__ :
a : Tuple = SCHEDULER_CONFIG_NAME
a : Union[str, Any] = []
a : str = True
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase=False , **UpperCamelCase , ) -> int:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = cls.load_config(
pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , )
return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False , **UpperCamelCase ) -> Dict:
self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase )
@property
def UpperCAmelCase_ ( self ) -> str:
return self._get_compatibles()
@classmethod
def UpperCAmelCase_ ( cls ) -> Tuple:
__lowerCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
__lowerCAmelCase = importlib.import_module(__name__.split("." )[0] )
__lowerCAmelCase = [
getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase )
]
return compatible_classes
| 39
| 0
|
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Optional[int] = logging.get_logger()
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : LevitConfig , lowerCamelCase : Path , lowerCamelCase : bool = True ):
'''simple docstring'''
print(f'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__lowerCAmelCase = timm.create_model("levit_128s" , pretrained=lowerCamelCase )
else:
__lowerCAmelCase = timm.create_model("levit_128" , pretrained=lowerCamelCase )
if hidden_sizes == 1_92:
__lowerCAmelCase = timm.create_model("levit_192" , pretrained=lowerCamelCase )
if hidden_sizes == 2_56:
__lowerCAmelCase = timm.create_model("levit_256" , pretrained=lowerCamelCase )
if hidden_sizes == 3_84:
__lowerCAmelCase = timm.create_model("levit_384" , pretrained=lowerCamelCase )
from_model.eval()
__lowerCAmelCase = LevitForImageClassificationWithTeacher(lowerCamelCase ).eval()
__lowerCAmelCase = OrderedDict()
__lowerCAmelCase = from_model.state_dict()
__lowerCAmelCase = list(from_model.state_dict().keys() )
__lowerCAmelCase = list(our_model.state_dict().keys() )
print(len(lowerCamelCase ) , len(lowerCamelCase ) )
for i in range(len(lowerCamelCase ) ):
__lowerCAmelCase = weights[og_keys[i]]
our_model.load_state_dict(lowerCamelCase )
__lowerCAmelCase = torch.randn((2, 3, 2_24, 2_24) )
__lowerCAmelCase = from_model(lowerCamelCase )
__lowerCAmelCase = our_model(lowerCamelCase ).logits
assert torch.allclose(lowerCamelCase , lowerCamelCase ), "The model logits don't match the original one."
__lowerCAmelCase = name
print(lowerCamelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__lowerCAmelCase = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'''Pushed {checkpoint_name}''' )
def __lowerCAmelCase ( lowerCamelCase : Path , lowerCamelCase : str = None , lowerCamelCase : bool = True ):
'''simple docstring'''
__lowerCAmelCase = """imagenet-1k-id2label.json"""
__lowerCAmelCase = 10_00
__lowerCAmelCase = (1, num_labels)
__lowerCAmelCase = """huggingface/label-files"""
__lowerCAmelCase = num_labels
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = partial(lowerCamelCase , num_labels=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase )
__lowerCAmelCase = {
"""levit-128S""": 1_28,
"""levit-128""": 1_28,
"""levit-192""": 1_92,
"""levit-256""": 2_56,
"""levit-384""": 3_84,
}
__lowerCAmelCase = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , lowerCamelCase , names_to_config[model_name] , lowerCamelCase , lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
lowerCAmelCase : Union[str, Any] = parser.parse_args()
lowerCAmelCase : Optional[int] = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 707
|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase : List[Any] = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase = None ) -> Union[str, Any]:
__lowerCAmelCase = (
os.path.join(UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__lowerCAmelCase = Extractor
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__lowerCAmelCase = os.path.abspath(UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase ) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> bool:
return force_extract or (
not os.path.isfile(UpperCamelCase ) and not (os.path.isdir(UpperCamelCase ) and os.listdir(UpperCamelCase ))
)
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False ) -> str:
__lowerCAmelCase = self.extractor.infer_extractor_format(UpperCamelCase )
if not extractor_format:
return input_path
__lowerCAmelCase = self._get_output_path(UpperCamelCase )
if self._do_extract(UpperCamelCase , UpperCamelCase ):
self.extractor.extract(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return output_path
class UpperCAmelCase__ ( UpperCamelCase__ ):
@classmethod
@abstractmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool:
...
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
...
class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
a : List[bytes] = []
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
with open(UpperCamelCase , "rb" ) as f:
return f.read(UpperCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool:
if not magic_number:
__lowerCAmelCase = max(len(UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
__lowerCAmelCase = cls.read_magic_number(UpperCamelCase , UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase__ ( UpperCamelCase__ ):
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool:
return tarfile.is_tarfile(UpperCamelCase )
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict:
def resolved(UpperCamelCase ) -> str:
return os.path.realpath(os.path.abspath(UpperCamelCase ) )
def badpath(UpperCamelCase , UpperCamelCase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCamelCase , UpperCamelCase ) ).startswith(UpperCamelCase )
def badlink(UpperCamelCase , UpperCamelCase ) -> bool:
# Links are interpreted relative to the directory containing the link
__lowerCAmelCase = resolved(os.path.join(UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=UpperCamelCase )
__lowerCAmelCase = resolved(UpperCamelCase )
for finfo in members:
if badpath(finfo.name , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(UpperCamelCase , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(UpperCamelCase , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
__lowerCAmelCase = tarfile.open(UpperCamelCase )
tar_file.extractall(UpperCamelCase , members=TarExtractor.safemembers(UpperCamelCase , UpperCamelCase ) )
tar_file.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x1F\x8B"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with gzip.open(UpperCamelCase , "rb" ) as gzip_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : List[Any] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool:
if super().is_extractable(UpperCamelCase , magic_number=UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCamelCase , "rb" ) as fp:
__lowerCAmelCase = _EndRecData(UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__lowerCAmelCase = fp.read(UpperCamelCase ) # CD is where we expect it to be
if len(UpperCamelCase ) == sizeCentralDir:
__lowerCAmelCase = struct.unpack(UpperCamelCase , UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with zipfile.ZipFile(UpperCamelCase , "r" ) as zip_file:
zip_file.extractall(UpperCamelCase )
zip_file.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Tuple = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with lzma.open(UpperCamelCase ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : str = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
__lowerCAmelCase = rarfile.RarFile(UpperCamelCase )
rf.extractall(UpperCamelCase )
rf.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : int = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
__lowerCAmelCase = zstd.ZstdDecompressor()
with open(UpperCamelCase , "rb" ) as ifh, open(UpperCamelCase , "wb" ) as ofh:
dctx.copy_stream(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with bza.open(UpperCamelCase , "rb" ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with pyazr.SevenZipFile(UpperCamelCase , "r" ) as archive:
archive.extractall(UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(UpperCamelCase , "rb" ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
a : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCAmelCase_ ( cls ) -> Optional[Any]:
return max(
len(UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(UpperCamelCase , UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict:
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase , magic_number_length=UpperCamelCase )
except OSError:
return b""
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = False ) -> bool:
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=UpperCamelCase , )
__lowerCAmelCase = cls.infer_extractor_format(UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase ) -> str: # <Added version="2.4.0"/>
__lowerCAmelCase = cls._get_magic_number_max_length()
__lowerCAmelCase = cls._read_magic_number(UpperCamelCase , UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCamelCase , magic_number=UpperCamelCase ):
return extractor_format
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(UpperCamelCase ) , exist_ok=UpperCamelCase )
# Prevent parallel extractions
__lowerCAmelCase = str(Path(UpperCamelCase ).with_suffix(".lock" ) )
with FileLock(UpperCamelCase ):
shutil.rmtree(UpperCamelCase , ignore_errors=UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCamelCase , UpperCamelCase ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=UpperCamelCase , )
__lowerCAmelCase = extractor if extractor != "deprecated" else extractor_format
else:
__lowerCAmelCase = cls.extractors[extractor_format]
return extractor.extract(UpperCamelCase , UpperCamelCase )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCamelCase ):
return extractor.extract(UpperCamelCase , UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase : Optional[Any] = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCAmelCase : Optional[Any] = {
'''t5-small''': 5_1_2,
'''t5-base''': 5_1_2,
'''t5-large''': 5_1_2,
'''t5-3b''': 5_1_2,
'''t5-11b''': 5_1_2,
}
lowerCAmelCase : str = '''▁'''
class UpperCAmelCase__ ( __A ):
a : int = VOCAB_FILES_NAMES
a : str = PRETRAINED_VOCAB_FILES_MAP
a : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase , UpperCamelCase="</s>" , UpperCamelCase="<unk>" , UpperCamelCase="<pad>" , UpperCamelCase=100 , UpperCamelCase=None , UpperCamelCase = None , UpperCamelCase=True , **UpperCamelCase , ) -> List[Any]:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowerCAmelCase = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowerCAmelCase = len(set(filter(lambda UpperCamelCase : bool("extra_id" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
__lowerCAmelCase = legacy
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCamelCase__ , **UpperCamelCase__ , )
__lowerCAmelCase = vocab_file
__lowerCAmelCase = extra_ids
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__lowerCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , UpperCamelCase__ , )
return max_model_length
@property
def UpperCAmelCase_ ( self ) -> List[Any]:
return self.sp_model.get_piece_size() + self._extra_ids
def UpperCAmelCase_ ( self ) -> Optional[Any]:
__lowerCAmelCase = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False ) -> Optional[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCamelCase__ )) + [1]
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def UpperCAmelCase_ ( self ) -> List[str]:
return list(
set(filter(lambda UpperCamelCase : bool(re.search(r"<extra_id_\d+>" , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def UpperCAmelCase_ ( self ) -> Optional[int]:
return [self._convert_token_to_id(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
if len(UpperCamelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[str]:
__lowerCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[Any]:
__lowerCAmelCase = self._add_eos_if_not_present(UpperCamelCase__ )
if token_ids_a is None:
return token_ids_a
else:
__lowerCAmelCase = self._add_eos_if_not_present(UpperCamelCase__ )
return token_ids_a + token_ids_a
def __getstate__( self ) -> str:
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self , UpperCamelCase ) -> List[str]:
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self , UpperCamelCase , **UpperCamelCase ) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
__lowerCAmelCase = SPIECE_UNDERLINE + text.replace(UpperCamelCase__ , " " )
return super().tokenize(UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase_ ( self , UpperCamelCase , **UpperCamelCase ) -> Union[str, Any]:
if not self.legacy:
__lowerCAmelCase = text.startswith(UpperCamelCase__ )
if is_first:
__lowerCAmelCase = text[1:]
__lowerCAmelCase = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(UpperCamelCase__ ):
__lowerCAmelCase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[str]:
if token.startswith("<extra_id_" ):
__lowerCAmelCase = re.match(r"<extra_id_(\d+)>" , UpperCamelCase__ )
__lowerCAmelCase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(UpperCamelCase__ )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
if index < self.sp_model.get_piece_size():
__lowerCAmelCase = self.sp_model.IdToPiece(UpperCamelCase__ )
else:
__lowerCAmelCase = F'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Dict:
__lowerCAmelCase = []
__lowerCAmelCase = ''
__lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
__lowerCAmelCase = True
__lowerCAmelCase = []
else:
current_sub_tokens.append(UpperCamelCase__ )
__lowerCAmelCase = False
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> Any:
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCAmelCase = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 708
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self ) -> List[str]:
# test for the above condition
self.test()
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = 0
__lowerCAmelCase = False
while not completed:
if counter == 1:
self.reset()
__lowerCAmelCase = self.advance()
if not self.does_advance(UpperCamelCase ):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.update(UpperCamelCase )
counter += 1
if counter > 1_0000:
raise Exception("update() does not fulfill the constraint." )
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly." )
@abstractmethod
def UpperCAmelCase_ ( self ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self ) -> int:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self ) -> int:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> str:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self , UpperCamelCase ) -> Dict:
super(UpperCamelCase , self ).__init__()
if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
__lowerCAmelCase = token_ids
__lowerCAmelCase = len(self.token_ids )
__lowerCAmelCase = -1 # the index of the currently fulfilled step
__lowerCAmelCase = False
def UpperCAmelCase_ ( self ) -> Optional[int]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
if self.does_advance(UpperCamelCase ):
self.fulfilled_idx += 1
__lowerCAmelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
__lowerCAmelCase = True
__lowerCAmelCase = completed
else:
# failed to make progress.
__lowerCAmelCase = True
self.reset()
return stepped, completed, reset
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = False
__lowerCAmelCase = 0
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Optional[Any]:
__lowerCAmelCase = PhrasalConstraint(self.token_ids )
if stateful:
__lowerCAmelCase = self.seqlen
__lowerCAmelCase = self.fulfilled_idx
__lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase , UpperCamelCase=True ) -> Optional[int]:
__lowerCAmelCase = max([len(UpperCamelCase ) for one in nested_token_ids] )
__lowerCAmelCase = {}
for token_ids in nested_token_ids:
__lowerCAmelCase = root
for tidx, token_id in enumerate(UpperCamelCase ):
if token_id not in level:
__lowerCAmelCase = {}
__lowerCAmelCase = level[token_id]
if no_subsets and self.has_subsets(UpperCamelCase , UpperCamelCase ):
raise ValueError(
"Each list in `nested_token_ids` can't be a complete subset of another list, but is"
F''' {nested_token_ids}.''' )
__lowerCAmelCase = root
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
__lowerCAmelCase = self.trie
for current_token in current_seq:
__lowerCAmelCase = start[current_token]
__lowerCAmelCase = list(start.keys() )
return next_tokens
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
__lowerCAmelCase = self.next_tokens(UpperCamelCase )
return len(UpperCamelCase ) == 0
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = list(root.values() )
if len(UpperCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCamelCase ) for nn in next_nodes] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
__lowerCAmelCase = self.count_leaves(UpperCamelCase )
return len(UpperCamelCase ) != leaf_count
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self , UpperCamelCase ) -> List[Any]:
super(UpperCamelCase , self ).__init__()
if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(UpperCamelCase , UpperCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
__lowerCAmelCase = DisjunctiveTrie(UpperCamelCase )
__lowerCAmelCase = nested_token_ids
__lowerCAmelCase = self.trie.max_height
__lowerCAmelCase = []
__lowerCAmelCase = False
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = self.trie.next_tokens(self.current_seq )
if len(UpperCamelCase ) == 0:
return None
else:
return token_list
def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[str]:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
if self.does_advance(UpperCamelCase ):
self.current_seq.append(UpperCamelCase )
__lowerCAmelCase = True
else:
__lowerCAmelCase = True
self.reset()
__lowerCAmelCase = self.trie.reached_leaf(self.current_seq )
__lowerCAmelCase = completed
return stepped, completed, reset
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = False
__lowerCAmelCase = []
def UpperCAmelCase_ ( self ) -> int:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Union[str, Any]:
__lowerCAmelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
__lowerCAmelCase = self.seqlen
__lowerCAmelCase = self.current_seq
__lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase ) -> Union[str, Any]:
__lowerCAmelCase = constraints
# max # of steps required to fulfill a given constraint
__lowerCAmelCase = max([c.seqlen for c in constraints] )
__lowerCAmelCase = len(UpperCamelCase )
__lowerCAmelCase = False
self.init_state()
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = []
__lowerCAmelCase = None
__lowerCAmelCase = [constraint.copy(stateful=UpperCamelCase ) for constraint in self.constraints]
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__lowerCAmelCase = constraint.advance()
if isinstance(UpperCamelCase , UpperCamelCase ):
token_list.append(UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
token_list.extend(UpperCamelCase )
else:
__lowerCAmelCase = self.inprogress_constraint.advance()
if isinstance(UpperCamelCase , UpperCamelCase ):
token_list.append(UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
token_list.extend(UpperCamelCase )
if len(UpperCamelCase ) == 0:
return None
else:
return token_list
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__lowerCAmelCase , __lowerCAmelCase = self.add(UpperCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Dict:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
__lowerCAmelCase , __lowerCAmelCase = False, False
if self.completed:
__lowerCAmelCase = True
__lowerCAmelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.inprogress_constraint.update(UpperCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase ) )
__lowerCAmelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__lowerCAmelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
__lowerCAmelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCamelCase ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = pending_constraint.update(UpperCamelCase )
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true." )
if complete:
self.complete_constraints.append(UpperCamelCase )
__lowerCAmelCase = None
if not complete and stepped:
__lowerCAmelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__lowerCAmelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__lowerCAmelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCAmelCase_ ( self , UpperCamelCase=True ) -> str:
__lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__lowerCAmelCase = [
constraint.copy(stateful=UpperCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__lowerCAmelCase = self.inprogress_constraint.copy(stateful=UpperCamelCase )
__lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 39
| 0
|
'''simple docstring'''
lowerCAmelCase : Optional[int] = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def __lowerCAmelCase ( lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowerCAmelCase = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
__lowerCAmelCase = Stack()
__lowerCAmelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowerCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(lowerCamelCase )
elif i == ")":
# RULE 4
__lowerCAmelCase = operator_stack.peek()
operator_stack.pop()
__lowerCAmelCase = operand_stack.peek()
operand_stack.pop()
__lowerCAmelCase = operand_stack.peek()
operand_stack.pop()
__lowerCAmelCase = operators[opr](lowerCamelCase , lowerCamelCase )
operand_stack.push(lowerCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(f'{equation} = {dijkstras_two_stack_algorithm(equation)}')
| 709
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ):
a : List[Any] = KandinskyImgaImgPipeline
a : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
a : List[Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
a : Any = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Union[str, Any] = False
@property
def UpperCAmelCase_ ( self ) -> int:
return 32
@property
def UpperCAmelCase_ ( self ) -> List[str]:
return 32
@property
def UpperCAmelCase_ ( self ) -> Dict:
return self.time_input_dim
@property
def UpperCAmelCase_ ( self ) -> int:
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ) -> int:
return 100
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
__lowerCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__lowerCAmelCase = MultilingualCLIP(UpperCamelCase )
__lowerCAmelCase = text_encoder.eval()
return text_encoder
@property
def UpperCAmelCase_ ( self ) -> List[str]:
torch.manual_seed(0 )
__lowerCAmelCase = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__lowerCAmelCase = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def UpperCAmelCase_ ( self ) -> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = self.dummy_tokenizer
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
__lowerCAmelCase = DDIMScheduler(**UpperCamelCase )
__lowerCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=0 ) -> Optional[Any]:
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((256, 256) )
if str(UpperCamelCase ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(UpperCamelCase )
else:
__lowerCAmelCase = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
__lowerCAmelCase = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = "cpu"
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**UpperCamelCase )
__lowerCAmelCase = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__lowerCAmelCase = "A red cartoon frog, 4k"
__lowerCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
__lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__lowerCAmelCase = pipeline(
UpperCamelCase , image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : str ):
'''simple docstring'''
__lowerCAmelCase = s.rsplit(lowerCamelCase , lowerCamelCase )
return new.join(lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Dict ):
'''simple docstring'''
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = {}
__lowerCAmelCase = ["group_1", "group_2", "group_3", "group_4"]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__lowerCAmelCase = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
__lowerCAmelCase = key.replace("res_path." , "res_path.path." )
if key.endswith(".w" ):
__lowerCAmelCase = rreplace(lowerCamelCase , ".w" , ".weight" , 1 )
if key.endswith(".b" ):
__lowerCAmelCase = rreplace(lowerCamelCase , ".b" , ".bias" , 1 )
__lowerCAmelCase = value.float()
return upgrade
@torch.no_grad()
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : int=None , lowerCamelCase : Dict=True ):
'''simple docstring'''
from dall_e import Encoder
__lowerCAmelCase = Encoder()
if os.path.exists(lowerCamelCase ):
__lowerCAmelCase = torch.load(lowerCamelCase )
else:
__lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowerCAmelCase = ckpt.state_dict()
encoder.load_state_dict(lowerCamelCase )
if config_path is not None:
__lowerCAmelCase = FlavaImageCodebookConfig.from_pretrained(lowerCamelCase )
else:
__lowerCAmelCase = FlavaImageCodebookConfig()
__lowerCAmelCase = FlavaImageCodebook(lowerCamelCase ).eval()
__lowerCAmelCase = encoder.state_dict()
__lowerCAmelCase = upgrade_state_dict(lowerCamelCase )
hf_model.load_state_dict(lowerCamelCase )
__lowerCAmelCase = hf_model.state_dict()
__lowerCAmelCase = count_parameters(lowerCamelCase )
__lowerCAmelCase = count_parameters(lowerCamelCase )
assert torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase : List[str] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 710
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
lowerCAmelCase : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class UpperCAmelCase__ :
a : Optional[str] = field(default=UpperCamelCase__ , metadata={"""help""": """The input training data file (a text file)."""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCAmelCase_ ( self ) -> Tuple:
if self.train_file is not None:
__lowerCAmelCase = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__lowerCAmelCase = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCAmelCase__ :
a : PreTrainedTokenizerBase
a : Union[bool, str, PaddingStrategy] = True
a : Optional[int] = None
a : Optional[int] = None
def __call__( self , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = "label" if "label" in features[0].keys() else "labels"
__lowerCAmelCase = [feature.pop(UpperCamelCase ) for feature in features]
__lowerCAmelCase = len(UpperCamelCase )
__lowerCAmelCase = len(features[0]["input_ids"] )
__lowerCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCamelCase )] for feature in features
]
__lowerCAmelCase = list(chain(*UpperCamelCase ) )
__lowerCAmelCase = self.tokenizer.pad(
UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
__lowerCAmelCase = {k: v.view(UpperCamelCase , UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
__lowerCAmelCase = torch.tensor(UpperCamelCase , dtype=torch.intaa )
return batch
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__lowerCAmelCase = {}
if data_args.train_file is not None:
__lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCAmelCase = data_args.validation_file
__lowerCAmelCase = data_args.train_file.split("." )[-1]
__lowerCAmelCase = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__lowerCAmelCase = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__lowerCAmelCase = [f'''ending{i}''' for i in range(4 )]
__lowerCAmelCase = "sent1"
__lowerCAmelCase = "sent2"
if data_args.max_seq_length is None:
__lowerCAmelCase = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
__lowerCAmelCase = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
__lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase : Tuple ):
__lowerCAmelCase = [[context] * 4 for context in examples[context_name]]
__lowerCAmelCase = examples[question_header_name]
__lowerCAmelCase = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
__lowerCAmelCase = list(chain(*lowerCamelCase ) )
__lowerCAmelCase = list(chain(*lowerCamelCase ) )
# Tokenize
__lowerCAmelCase = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
__lowerCAmelCase = raw_datasets["train"]
if data_args.max_train_samples is not None:
__lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_train_samples )
__lowerCAmelCase = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
__lowerCAmelCase = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
__lowerCAmelCase = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
__lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_eval_samples )
__lowerCAmelCase = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
__lowerCAmelCase = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__lowerCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase : Dict ):
__lowerCAmelCase , __lowerCAmelCase = eval_predictions
__lowerCAmelCase = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
__lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("train" , lowerCamelCase )
trainer.save_metrics("train" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
__lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("eval" , lowerCamelCase )
trainer.save_metrics("eval" , lowerCamelCase )
__lowerCAmelCase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 39
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : str = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = ["AlbertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = ["AlbertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {}
lowerCAmelCase : Dict[Optional[str], str] = {}
lowerCAmelCase : Dict[Optional[str], Exception] = {}
def __lowerCAmelCase ( lowerCamelCase : type , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None , ):
'''simple docstring'''
__lowerCAmelCase = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
__lowerCAmelCase = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
__lowerCAmelCase = format_type
def __lowerCAmelCase ( lowerCamelCase : Exception , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[List[str]] = None ):
'''simple docstring'''
__lowerCAmelCase = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__lowerCAmelCase = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
lowerCAmelCase : Optional[int] = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
lowerCAmelCase : str = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
lowerCAmelCase : Any = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def __lowerCAmelCase ( lowerCamelCase : Optional[str] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __lowerCAmelCase ( lowerCamelCase : Optional[str] , **lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCAmelCase = get_format_type_from_alias(lowerCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 39
| 0
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase__ ( datasets.BuilderConfig ):
a : Dict = None
class UpperCAmelCase__ ( datasets.ArrowBasedBuilder ):
a : List[str] = PandasConfig
def UpperCAmelCase_ ( self ) -> Dict:
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[Any]:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
__lowerCAmelCase = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowerCAmelCase = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
__lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowerCAmelCase = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={"files": files} ) )
return splits
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowerCAmelCase = table_cast(_UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]:
for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
with open(_UpperCAmelCase , "rb" ) as f:
__lowerCAmelCase = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase ) )
yield i, self._cast_table(_UpperCAmelCase )
| 712
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCAmelCase ( lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
__lowerCAmelCase = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
__lowerCAmelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCAmelCase = [4, 4, 4, 4]
__lowerCAmelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCAmelCase = [3, 3, 3, 3]
else:
__lowerCAmelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCAmelCase = 96
elif "small" in model_name:
__lowerCAmelCase = 96
elif "base" in model_name:
__lowerCAmelCase = 1_28
elif "large" in model_name:
__lowerCAmelCase = 1_92
elif "xlarge" in model_name:
__lowerCAmelCase = 2_56
elif "huge" in model_name:
__lowerCAmelCase = 3_52
# set label information
__lowerCAmelCase = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
__lowerCAmelCase = "imagenet-22k-id2label.json"
else:
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = FocalNetConfig(
embed_dim=lowerCamelCase , depths=lowerCamelCase , focal_levels=lowerCamelCase , focal_windows=lowerCamelCase , use_conv_embed=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase , use_post_layernorm=lowerCamelCase , use_layerscale=lowerCamelCase , )
return config
def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__lowerCAmelCase = "encoder." + name
if "encoder.layers" in name:
__lowerCAmelCase = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
__lowerCAmelCase = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
__lowerCAmelCase = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCAmelCase = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCAmelCase = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCAmelCase = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
__lowerCAmelCase = "layernorm.weight"
if name == "norm.bias":
__lowerCAmelCase = "layernorm.bias"
if "head" in name:
__lowerCAmelCase = name.replace("head" , "classifier" )
else:
__lowerCAmelCase = "focalnet." + name
return name
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
__lowerCAmelCase = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
__lowerCAmelCase = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCamelCase )
__lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
__lowerCAmelCase = state_dict.pop(lowerCamelCase )
__lowerCAmelCase = val
__lowerCAmelCase = get_focalnet_config(lowerCamelCase )
__lowerCAmelCase = FocalNetForImageClassification(lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase )
# verify conversion
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = BitImageProcessor(
do_resize=lowerCamelCase , size={"shortest_edge": 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase , crop_size=2_24 , do_normalize=lowerCamelCase , image_mean=lowerCamelCase , image_std=lowerCamelCase , )
__lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
__lowerCAmelCase = processor(images=lowerCamelCase , return_tensors="pt" )
__lowerCAmelCase = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
__lowerCAmelCase = image_transforms(lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase , atol=1e-4 )
__lowerCAmelCase = model(**lowerCamelCase )
__lowerCAmelCase = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCAmelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
__lowerCAmelCase = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
__lowerCAmelCase = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
__lowerCAmelCase = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
__lowerCAmelCase = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
__lowerCAmelCase = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print(f'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(f'''{model_name}''' )
processor.push_to_hub(f'''{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 39
| 0
|
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowerCAmelCase : str = '''base_with_context'''
def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=UpperCAmelCase__ )
for lyr_num, lyr in enumerate(model.encoders ):
__lowerCAmelCase = weights[f'''layers_{lyr_num}''']
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
__lowerCAmelCase = ly_weight["attention"]
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=UpperCAmelCase__ )
for lyr_num, lyr in enumerate(model.encoders ):
__lowerCAmelCase = weights[f'''layers_{lyr_num}''']
__lowerCAmelCase = ly_weight["attention"]
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def __lowerCAmelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=UpperCAmelCase__ )
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__lowerCAmelCase = weights[f'''layers_{lyr_num}''']
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
__lowerCAmelCase = ly_weight["self_attention"]
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
__lowerCAmelCase = ly_weight["MultiHeadDotProductAttention_0"]
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def __lowerCAmelCase ( lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__lowerCAmelCase = jnp.tree_util.tree_map(onp.array , UpperCAmelCase__ )
__lowerCAmelCase = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
__lowerCAmelCase = os.path.join(args.checkpoint_path , ".." , "config.gin" )
__lowerCAmelCase = inference.parse_training_gin_file(UpperCAmelCase__ , UpperCAmelCase__ )
__lowerCAmelCase = inference.InferenceModel(args.checkpoint_path , UpperCAmelCase__ )
__lowerCAmelCase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
__lowerCAmelCase = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
__lowerCAmelCase = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
__lowerCAmelCase = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__lowerCAmelCase = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , UpperCAmelCase__ )
__lowerCAmelCase = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , UpperCAmelCase__ )
__lowerCAmelCase = load_decoder(ta_checkpoint["target"]["decoder"] , UpperCAmelCase__ )
__lowerCAmelCase = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
__lowerCAmelCase = SpectrogramDiffusionPipeline(
notes_encoder=UpperCAmelCase__ , continuous_encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , melgan=UpperCAmelCase__ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=f'{MODEL}/checkpoint_500000',
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
lowerCAmelCase : Optional[int] = parser.parse_args()
main(args)
| 713
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase : str = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase : Optional[Any] = {
'''squeezebert/squeezebert-uncased''': 5_1_2,
'''squeezebert/squeezebert-mnli''': 5_1_2,
'''squeezebert/squeezebert-mnli-headless''': 5_1_2,
}
lowerCAmelCase : Tuple = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Dict = VOCAB_FILES_NAMES
a : Any = PRETRAINED_VOCAB_FILES_MAP
a : Dict = PRETRAINED_INIT_CONFIGURATION
a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[Any] = SqueezeBertTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ) -> List[Any]:
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(UpperCamelCase , normalizer_state.pop("type" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**UpperCamelCase )
__lowerCAmelCase = do_lower_case
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None ) -> str:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
while a != 0:
__lowerCAmelCase , __lowerCAmelCase = b % a, a
return b
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
if gcd(UpperCAmelCase__ , UpperCAmelCase__ ) != 1:
__lowerCAmelCase = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(UpperCAmelCase__ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1, 0, a
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0, 1, m
while va != 0:
__lowerCAmelCase = ua // va
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 714
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(lowerCamelCase ) / len(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
| 0
|
'''simple docstring'''
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowerCAmelCase : List[str] = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class UpperCAmelCase__ :
a : str
a : Optional[str] = None
a : Optional[Union[str, int]] = None
a : Optional[Union[str, int]] = None
a : Optional[Union[str, int]] = None
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Optional[Any]:
return F'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return self.major, self.minor, self.patch
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Dict:
if isinstance(UpperCamelCase , UpperCamelCase ):
return Version(UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
return other
raise TypeError(F'''{other} (type {type(UpperCamelCase )}) cannot be compared to version.''' )
def __eq__( self , UpperCamelCase ) -> int:
try:
__lowerCAmelCase = self._validate_operand(UpperCamelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , UpperCamelCase ) -> List[str]:
__lowerCAmelCase = self._validate_operand(UpperCamelCase )
return self.tuple < other.tuple
def __hash__( self ) -> List[str]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase ) -> str:
__lowerCAmelCase = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.version_str
def __lowerCAmelCase ( lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = _VERSION_REG.match(_UpperCamelCase )
if not res:
raise ValueError(f'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(_UpperCamelCase ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def __lowerCAmelCase ( lowerCamelCase : Any ):
'''simple docstring'''
return ".".join(str(_UpperCamelCase ) for v in version_tuple )
| 715
|
'''simple docstring'''
import re
def __lowerCAmelCase ( lowerCamelCase : str ):
'''simple docstring'''
__lowerCAmelCase = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowerCamelCase , lowerCamelCase ) )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 39
| 0
|
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def __lowerCAmelCase ( lowerCamelCase : Tuple , lowerCamelCase : Dict , lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowerCAmelCase = AlbertConfig.from_json_file(lowerCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
__lowerCAmelCase = AlbertForPreTraining(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 716
|
'''simple docstring'''
import os
import sys
import unittest
lowerCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = {"BertModelTest": "BertModelTester"}
__lowerCAmelCase = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase )
__lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase )
__lowerCAmelCase = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
__lowerCAmelCase = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
__lowerCAmelCase = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__lowerCAmelCase = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowerCamelCase__ , cache_dir=lowerCamelCase__ )
__lowerCAmelCase = [t[-1] for t in os.walk(os.path.join(lowerCamelCase__ , os.listdir(lowerCamelCase__ )[0] , "snapshots" ) )]
__lowerCAmelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowerCamelCase__ )
__lowerCAmelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__lowerCAmelCase = jax.random.PRNGKey(0 )
__lowerCAmelCase = 4
__lowerCAmelCase = jax.device_count()
__lowerCAmelCase = num_samples * [prompt]
__lowerCAmelCase = pipeline.prepare_inputs(lowerCamelCase__ )
# shard inputs and rng
__lowerCAmelCase = replicate(lowerCamelCase__ )
__lowerCAmelCase = jax.random.split(lowerCamelCase__ , lowerCamelCase__ )
__lowerCAmelCase = shard(lowerCamelCase__ )
__lowerCAmelCase = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(lowerCamelCase__ , dtype=np.floataa ).sum() - 4_9947.875 ) < 5E-1
__lowerCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase__ ) == num_samples
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=lowerCamelCase__ )
__lowerCAmelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__lowerCAmelCase = jax.random.PRNGKey(0 )
__lowerCAmelCase = 50
__lowerCAmelCase = jax.device_count()
__lowerCAmelCase = num_samples * [prompt]
__lowerCAmelCase = pipeline.prepare_inputs(lowerCamelCase__ )
# shard inputs and rng
__lowerCAmelCase = replicate(lowerCamelCase__ )
__lowerCAmelCase = jax.random.split(lowerCamelCase__ , lowerCamelCase__ )
__lowerCAmelCase = shard(lowerCamelCase__ )
__lowerCAmelCase = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase__ , dtype=np.floataa ).sum() - 238_3808.2) ) < 5E-1
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowerCamelCase__ )
__lowerCAmelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__lowerCAmelCase = jax.random.PRNGKey(0 )
__lowerCAmelCase = 50
__lowerCAmelCase = jax.device_count()
__lowerCAmelCase = num_samples * [prompt]
__lowerCAmelCase = pipeline.prepare_inputs(lowerCamelCase__ )
# shard inputs and rng
__lowerCAmelCase = replicate(lowerCamelCase__ )
__lowerCAmelCase = jax.random.split(lowerCamelCase__ , lowerCamelCase__ )
__lowerCAmelCase = shard(lowerCamelCase__ )
__lowerCAmelCase = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase__ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def UpperCAmelCase_ ( self ) -> int:
__lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
__lowerCAmelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__lowerCAmelCase = jax.random.PRNGKey(0 )
__lowerCAmelCase = 50
__lowerCAmelCase = jax.device_count()
__lowerCAmelCase = num_samples * [prompt]
__lowerCAmelCase = pipeline.prepare_inputs(lowerCamelCase__ )
# shard inputs and rng
__lowerCAmelCase = replicate(lowerCamelCase__ )
__lowerCAmelCase = jax.random.split(lowerCamelCase__ , lowerCamelCase__ )
__lowerCAmelCase = shard(lowerCamelCase__ )
__lowerCAmelCase = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase__ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , )
__lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , )
__lowerCAmelCase = scheduler.create_state()
__lowerCAmelCase = scheduler_state
__lowerCAmelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__lowerCAmelCase = jax.random.PRNGKey(0 )
__lowerCAmelCase = 50
__lowerCAmelCase = jax.device_count()
__lowerCAmelCase = num_samples * [prompt]
__lowerCAmelCase = pipeline.prepare_inputs(lowerCamelCase__ )
# shard inputs and rng
__lowerCAmelCase = replicate(lowerCamelCase__ )
__lowerCAmelCase = jax.random.split(lowerCamelCase__ , lowerCamelCase__ )
__lowerCAmelCase = shard(lowerCamelCase__ )
__lowerCAmelCase = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase__ , dtype=np.floataa ).sum() - 234_7693.5) ) < 5E-1
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__lowerCAmelCase = jax.device_count()
__lowerCAmelCase = num_samples * [prompt]
__lowerCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , lowerCamelCase__ )
__lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowerCamelCase__ , )
__lowerCAmelCase = replicate(lowerCamelCase__ )
__lowerCAmelCase = pipeline.prepare_inputs(lowerCamelCase__ )
__lowerCAmelCase = shard(lowerCamelCase__ )
__lowerCAmelCase = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
__lowerCAmelCase = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
__lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowerCamelCase__ , use_memory_efficient_attention=lowerCamelCase__ , )
__lowerCAmelCase = replicate(lowerCamelCase__ )
__lowerCAmelCase = pipeline.prepare_inputs(lowerCamelCase__ )
__lowerCAmelCase = shard(lowerCamelCase__ )
__lowerCAmelCase = pipeline(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , jit=lowerCamelCase__ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
__lowerCAmelCase = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 717
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : torch.FloatTensor
class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self , UpperCamelCase = 16 , UpperCamelCase = 88 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = 1 , UpperCamelCase = 0.0 , UpperCamelCase = 32 , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = "geglu" , UpperCamelCase = True , UpperCamelCase = True , ) -> List[str]:
super().__init__()
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = attention_head_dim
__lowerCAmelCase = num_attention_heads * attention_head_dim
__lowerCAmelCase = in_channels
__lowerCAmelCase = torch.nn.GroupNorm(num_groups=UpperCamelCase , num_channels=UpperCamelCase , eps=1E-6 , affine=UpperCamelCase )
__lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase )
# 3. Define transformers blocks
__lowerCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , cross_attention_dim=UpperCamelCase , activation_fn=UpperCamelCase , attention_bias=UpperCamelCase , double_self_attention=UpperCamelCase , norm_elementwise_affine=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
__lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=1 , UpperCamelCase=None , UpperCamelCase = True , ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = hidden_states.shape
__lowerCAmelCase = batch_frames // num_frames
__lowerCAmelCase = hidden_states
__lowerCAmelCase = hidden_states[None, :].reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__lowerCAmelCase = self.norm(UpperCamelCase )
__lowerCAmelCase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = self.proj_in(UpperCamelCase )
# 2. Blocks
for block in self.transformer_blocks:
__lowerCAmelCase = block(
UpperCamelCase , encoder_hidden_states=UpperCamelCase , timestep=UpperCamelCase , cross_attention_kwargs=UpperCamelCase , class_labels=UpperCamelCase , )
# 3. Output
__lowerCAmelCase = self.proj_out(UpperCamelCase )
__lowerCAmelCase = (
hidden_states[None, None, :]
.reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__lowerCAmelCase = hidden_states.reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase : List[str] = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __lowerCAmelCase ( lowerCamelCase : Optional[int] ):
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def __lowerCAmelCase ( lowerCamelCase : Optional[int] ):
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_terminal_summary_main
__lowerCAmelCase = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 718
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCAmelCase ( lowerCamelCase : bytes , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = f'''{sampling_rate}'''
__lowerCAmelCase = "1"
__lowerCAmelCase = "f32le"
__lowerCAmelCase = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__lowerCAmelCase = ffmpeg_process.communicate(lowerCamelCase )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
__lowerCAmelCase = output_stream[0]
__lowerCAmelCase = np.frombuffer(lowerCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : str = "f32le" , ):
'''simple docstring'''
__lowerCAmelCase = f'''{sampling_rate}'''
__lowerCAmelCase = "1"
if format_for_conversion == "s16le":
__lowerCAmelCase = 2
elif format_for_conversion == "f32le":
__lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
__lowerCAmelCase = platform.system()
if system == "Linux":
__lowerCAmelCase = "alsa"
__lowerCAmelCase = "default"
elif system == "Darwin":
__lowerCAmelCase = "avfoundation"
__lowerCAmelCase = ":0"
elif system == "Windows":
__lowerCAmelCase = "dshow"
__lowerCAmelCase = "default"
__lowerCAmelCase = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
__lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__lowerCAmelCase = _ffmpeg_stream(lowerCamelCase , lowerCamelCase )
for item in iterator:
yield item
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[Tuple[float, float], float]] = None , lowerCamelCase : str = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
__lowerCAmelCase = stream_chunk_s
else:
__lowerCAmelCase = chunk_length_s
__lowerCAmelCase = ffmpeg_microphone(lowerCamelCase , lowerCamelCase , format_for_conversion=lowerCamelCase )
if format_for_conversion == "s16le":
__lowerCAmelCase = np.intaa
__lowerCAmelCase = 2
elif format_for_conversion == "f32le":
__lowerCAmelCase = np.floataa
__lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
__lowerCAmelCase = chunk_length_s / 6
__lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCamelCase , (int, float) ):
__lowerCAmelCase = [stride_length_s, stride_length_s]
__lowerCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__lowerCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__lowerCAmelCase = datetime.datetime.now()
__lowerCAmelCase = datetime.timedelta(seconds=lowerCamelCase )
for item in chunk_bytes_iter(lowerCamelCase , lowerCamelCase , stride=(stride_left, stride_right) , stream=lowerCamelCase ):
# Put everything back in numpy scale
__lowerCAmelCase = np.frombuffer(item["raw"] , dtype=lowerCamelCase )
__lowerCAmelCase = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
__lowerCAmelCase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Tuple[int, int] , lowerCamelCase : bool = False ):
'''simple docstring'''
__lowerCAmelCase = B""
__lowerCAmelCase , __lowerCAmelCase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
__lowerCAmelCase = 0
for raw in iterator:
acc += raw
if stream and len(lowerCamelCase ) < chunk_len:
__lowerCAmelCase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
__lowerCAmelCase = (_stride_left, stride_right)
__lowerCAmelCase = {"raw": acc[:chunk_len], "stride": stride}
if stream:
__lowerCAmelCase = False
yield item
__lowerCAmelCase = stride_left
__lowerCAmelCase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCamelCase ) > stride_left:
__lowerCAmelCase = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
__lowerCAmelCase = False
yield item
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCamelCase , stdout=subprocess.PIPE , bufsize=lowerCamelCase ) as ffmpeg_process:
while True:
__lowerCAmelCase = ffmpeg_process.stdout.read(lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 39
| 0
|
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
lowerCAmelCase : Optional[Any] = namedtuple('''covid_data''', '''cases deaths recovered''')
def __lowerCAmelCase ( lowerCamelCase : Optional[int] = "https://www.worldometers.info/coronavirus/" ):
'''simple docstring'''
__lowerCAmelCase = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(UpperCamelCase__ ).content ).xpath(UpperCamelCase__ ) )
lowerCAmelCase : List[str] = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 719
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __lowerCAmelCase ( lowerCamelCase : List[str] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCAmelCase__ ( UpperCamelCase__ ):
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase ) -> Tuple:
__lowerCAmelCase = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=UpperCamelCase , default=UpperCamelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=UpperCamelCase , help="Name of the model to download" )
download_parser.set_defaults(func=UpperCamelCase )
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
__lowerCAmelCase = model
__lowerCAmelCase = cache
__lowerCAmelCase = force
__lowerCAmelCase = trust_remote_code
def UpperCAmelCase_ ( self ) -> Any:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 39
| 0
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCAmelCase : int = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCAmelCase : Dict = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : str=1_00 , lowerCamelCase : Any=" " ):
'''simple docstring'''
__lowerCAmelCase = text.split(lowerCamelCase__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowerCamelCase__ ) , lowerCamelCase__ )]
def __lowerCAmelCase ( lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(lowerCamelCase__ ):
titles.append(title if title is not None else "" )
texts.append(lowerCamelCase__ )
return {"title": titles, "text": texts}
def __lowerCAmelCase ( lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=lowerCamelCase__ , padding="longest" , return_tensors="pt" )["input_ids"]
__lowerCAmelCase = ctx_encoder(input_ids.to(device=lowerCamelCase__ ) , return_dict=lowerCamelCase__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __lowerCAmelCase ( lowerCamelCase : Dict , lowerCamelCase : Any , lowerCamelCase : int , ):
'''simple docstring'''
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__lowerCAmelCase = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__lowerCAmelCase = dataset.map(lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=processing_args.num_proc )
# And compute the embeddings
__lowerCAmelCase = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowerCamelCase__ )
__lowerCAmelCase = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__lowerCAmelCase = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
__lowerCAmelCase = dataset.map(
partial(lowerCamelCase__ , ctx_encoder=lowerCamelCase__ , ctx_tokenizer=lowerCamelCase__ ) , batched=lowerCamelCase__ , batch_size=processing_args.batch_size , features=lowerCamelCase__ , )
# And finally save your dataset
__lowerCAmelCase = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(lowerCamelCase__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__lowerCAmelCase = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=lowerCamelCase__ )
# And save the index
__lowerCAmelCase = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(lowerCamelCase__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCAmelCase__ :
a : str = field(
default=str(Path(UpperCamelCase__ ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
a : str = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
a : str = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
a : Optional[str] = field(
default=str(Path(UpperCamelCase__ ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class UpperCAmelCase__ :
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
a : int = field(
default=1_6 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class UpperCAmelCase__ :
a : int = field(
default=7_6_8 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
a : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCAmelCase : List[Any] = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase : Optional[int] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 720
|
'''simple docstring'''
def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = 1
__lowerCAmelCase = 2
while i * i <= n:
__lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = 1
__lowerCAmelCase = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCamelCase ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 39
| 0
|
'''simple docstring'''
lowerCAmelCase : int = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __lowerCAmelCase ( lowerCamelCase : bytes ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
__lowerCAmelCase = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(lowerCamelCase )
__lowerCAmelCase = "".join(bin(lowerCamelCase )[2:].zfill(8 ) for byte in data )
__lowerCAmelCase = len(lowerCamelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
__lowerCAmelCase = B"=" * ((6 - len(lowerCamelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowerCamelCase ) % 6)
else:
__lowerCAmelCase = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowerCamelCase ) , 6 ) ).encode()
+ padding
)
def __lowerCAmelCase ( lowerCamelCase : str ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ) and not isinstance(lowerCamelCase , lowerCamelCase ):
__lowerCAmelCase = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(lowerCamelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowerCamelCase , lowerCamelCase ):
try:
__lowerCAmelCase = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
__lowerCAmelCase = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowerCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__lowerCAmelCase = encoded_data[:-padding]
__lowerCAmelCase = "".join(
bin(B64_CHARSET.index(lowerCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__lowerCAmelCase = "".join(
bin(B64_CHARSET.index(lowerCamelCase ) )[2:].zfill(6 ) for char in encoded_data )
__lowerCAmelCase = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowerCamelCase ) , 8 )
]
return bytes(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Optional[Any] = """dpr"""
def __init__( self , UpperCamelCase=3_0522 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=1E-12 , UpperCamelCase=0 , UpperCamelCase="absolute" , UpperCamelCase = 0 , **UpperCamelCase , ) -> Tuple:
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = projection_dim
__lowerCAmelCase = position_embedding_type
| 39
| 0
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowerCAmelCase : Optional[int] = get_tests_dir('''fixtures''')
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Optional[int]:
# A mock response for an HTTP head request to emulate server down
__lowerCAmelCase = mock.Mock()
__lowerCAmelCase = 500
__lowerCAmelCase = {}
__lowerCAmelCase = HTTPError
__lowerCAmelCase = {}
# Download this model to make sure it's in the cache.
__lowerCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=UpperCamelCase ) as mock_head:
__lowerCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase_ ( self ) -> Any:
# This test is for deprecated behavior and can be removed in v5
__lowerCAmelCase = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def UpperCAmelCase_ ( self ) -> str:
with self.assertRaises(UpperCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(UpperCamelCase )
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
@classmethod
def UpperCAmelCase_ ( cls ) -> int:
__lowerCAmelCase = TOKEN
HfFolder.save_token(UpperCamelCase )
@classmethod
def UpperCAmelCase_ ( cls ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = ViTImageProcessor.from_pretrained(UpperCamelCase )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
__lowerCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase , getattr(UpperCamelCase , UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCamelCase , repo_id="test-image-processor" , push_to_hub=UpperCamelCase , use_auth_token=self._token )
__lowerCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase , getattr(UpperCamelCase , UpperCamelCase ) )
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = ViTImageProcessor.from_pretrained(UpperCamelCase )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
__lowerCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase , getattr(UpperCamelCase , UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCamelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=UpperCamelCase , use_auth_token=self._token )
__lowerCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCamelCase , getattr(UpperCamelCase , UpperCamelCase ) )
def UpperCAmelCase_ ( self ) -> int:
CustomImageProcessor.register_for_auto_class()
__lowerCAmelCase = CustomImageProcessor.from_pretrained(UpperCamelCase )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(
F'''{USER}/test-dynamic-image-processor''' , trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
| 700
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : Union[str, Any] = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 701
|
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCAmelCase = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
__lowerCAmelCase = features.copy()
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
assert isinstance(lowerCamelCase , lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , split=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
if issubclass(lowerCamelCase , lowerCamelCase ):
__lowerCAmelCase = jsonl_path
elif issubclass(lowerCamelCase , lowerCamelCase ):
__lowerCAmelCase = [jsonl_path]
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_dataset(lowerCamelCase , lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : str=("train",) ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase )
for split in splits:
__lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase ( lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : List[str] ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read()
_check_json_datasetdict(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , features=lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_datasetdict(lowerCamelCase , lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : int ):
'''simple docstring'''
if split:
__lowerCAmelCase = {split: jsonl_path}
else:
__lowerCAmelCase = "train"
__lowerCAmelCase = {"train": jsonl_path, "test": jsonl_path}
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read()
_check_json_datasetdict(lowerCamelCase , lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return json.load(lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
return [json.loads(lowerCamelCase ) for line in buffer]
class UpperCAmelCase__ :
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json_function(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
assert isinstance(exported_content[0] , UpperCamelCase )
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json_function(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
assert isinstance(exported_content[0] , UpperCamelCase )
assert len(UpperCamelCase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
__lowerCAmelCase = load_json(UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase ) == 10
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any:
with pytest.raises(UpperCamelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
__lowerCAmelCase = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}'''
__lowerCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase , UpperCamelCase , compression=UpperCamelCase ).write()
with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f:
__lowerCAmelCase = f.read()
with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f:
__lowerCAmelCase = f.read()
assert exported_content == original_content
| 39
| 0
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Tuple = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ):
a : Tuple = XLMRobertaTokenizer
a : Any = XLMRobertaTokenizerFast
a : List[Any] = True
a : Tuple = True
def UpperCAmelCase_ ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = XLMRobertaTokenizer(UpperCamelCase , keep_accents=UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(UpperCamelCase ) , 1002 )
def UpperCAmelCase_ ( self ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = XLMRobertaTokenizer(UpperCamelCase , keep_accents=UpperCamelCase )
__lowerCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowerCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def UpperCAmelCase_ ( self ) -> Optional[int]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__lowerCAmelCase = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = tokenizer_r.save_pretrained(UpperCamelCase )
__lowerCAmelCase = tokenizer_p.save_pretrained(UpperCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
__lowerCAmelCase = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(UpperCamelCase , UpperCamelCase )
# Checks everything loads correctly in the same way
__lowerCAmelCase = tokenizer_r.from_pretrained(UpperCamelCase )
__lowerCAmelCase = tokenizer_p.from_pretrained(UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase , UpperCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCamelCase )
# Save tokenizer rust, legacy_format=True
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = tokenizer_r.save_pretrained(UpperCamelCase , legacy_format=UpperCamelCase )
__lowerCAmelCase = tokenizer_p.save_pretrained(UpperCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase , UpperCamelCase )
# Checks everything loads correctly in the same way
__lowerCAmelCase = tokenizer_r.from_pretrained(UpperCamelCase )
__lowerCAmelCase = tokenizer_p.from_pretrained(UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase , UpperCamelCase ) )
shutil.rmtree(UpperCamelCase )
# Save tokenizer rust, legacy_format=False
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = tokenizer_r.save_pretrained(UpperCamelCase , legacy_format=UpperCamelCase )
__lowerCAmelCase = tokenizer_p.save_pretrained(UpperCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowerCAmelCase = tokenizer_r.from_pretrained(UpperCamelCase )
__lowerCAmelCase = tokenizer_p.from_pretrained(UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase , UpperCamelCase ) )
shutil.rmtree(UpperCamelCase )
@cached_property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCamelCase , f.name )
__lowerCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=UpperCamelCase )
__lowerCAmelCase = pickle.dumps(UpperCamelCase )
pickle.loads(UpperCamelCase )
def UpperCAmelCase_ ( self ) -> int:
if not self.test_rust_tokenizer:
return
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = tokenizer.tokenize(UpperCamelCase )
__lowerCAmelCase = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
__lowerCAmelCase = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(UpperCamelCase )
__lowerCAmelCase = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@slow
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = "Hello World!"
__lowerCAmelCase = [0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(UpperCamelCase , self.big_tokenizer.encode(UpperCamelCase ) )
@slow
def UpperCAmelCase_ ( self ) -> int:
__lowerCAmelCase = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
__lowerCAmelCase = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(UpperCamelCase , self.big_tokenizer.encode(UpperCamelCase ) )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
# fmt: off
__lowerCAmelCase = {"input_ids": [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 702
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Optional[Any] = """dpr"""
def __init__( self , UpperCamelCase=3_0522 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=1E-12 , UpperCamelCase=0 , UpperCamelCase="absolute" , UpperCamelCase = 0 , **UpperCamelCase , ) -> Tuple:
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = projection_dim
__lowerCAmelCase = position_embedding_type
| 703
|
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : List[str] = (CMStochasticIterativeScheduler,)
a : str = 1_0
def UpperCAmelCase_ ( self , **UpperCamelCase ) -> str:
__lowerCAmelCase = {
"num_train_timesteps": 201,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
config.update(**UpperCamelCase )
return config
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = 10
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = self.scheduler_classes[0](**UpperCamelCase )
scheduler.set_timesteps(UpperCamelCase )
__lowerCAmelCase = scheduler.timesteps[0]
__lowerCAmelCase = scheduler.timesteps[1]
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ ( self ) -> Any:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = 1
scheduler.set_timesteps(UpperCamelCase )
__lowerCAmelCase = scheduler.timesteps
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(UpperCamelCase ):
# 1. scale model input
__lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# 2. predict noise residual
__lowerCAmelCase = model(UpperCamelCase , UpperCamelCase )
# 3. predict previous sample x_t-1
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [106, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase )
__lowerCAmelCase = scheduler.timesteps
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# 2. predict noise residual
__lowerCAmelCase = model(UpperCamelCase , UpperCamelCase )
# 3. predict previous sample x_t-1
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [39, 30, 12, 15, 0]
with self.assertRaises(UpperCamelCase , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [39, 30, 12, 1, 0]
__lowerCAmelCase = len(UpperCamelCase )
with self.assertRaises(UpperCamelCase , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase , timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
lowerCAmelCase : List[Any] = '''Hello world! cécé herlolip'''
lowerCAmelCase : Tuple = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def __lowerCAmelCase ( lowerCamelCase : Dict , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = BertAbsConfig(
temp_dir="." , finetune_bert=lowerCamelCase , large=lowerCamelCase , share_emb=lowerCamelCase , use_bert_emb=lowerCamelCase , encoder="bert" , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , )
__lowerCAmelCase = torch.load(lowerCamelCase , lambda lowerCamelCase , lowerCamelCase : storage )
__lowerCAmelCase = AbsSummarizer(lowerCamelCase , torch.device("cpu" ) , lowerCamelCase )
original.eval()
__lowerCAmelCase = BertAbsSummarizer(lowerCamelCase , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
__lowerCAmelCase = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
__lowerCAmelCase = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowerCamelCase )) )
__lowerCAmelCase = torch.tensor(lowerCamelCase ).unsqueeze(0 )
__lowerCAmelCase = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowerCamelCase )) )
__lowerCAmelCase = torch.tensor(lowerCamelCase ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__lowerCAmelCase = encoder_input_ids
__lowerCAmelCase = decoder_input_ids
__lowerCAmelCase = __lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = __lowerCAmelCase = None
__lowerCAmelCase = __lowerCAmelCase = None
__lowerCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__lowerCAmelCase = original(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )[0]
__lowerCAmelCase = original.generator(lowerCamelCase )
__lowerCAmelCase = new_model(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )[0]
__lowerCAmelCase = new_model.generator(lowerCamelCase )
__lowerCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase ) )
__lowerCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase ) )
__lowerCAmelCase = torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 704
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCAmelCase ( lowerCamelCase : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
__lowerCAmelCase = BeautifulSoup(requests.get(lowerCamelCase ).text , "html.parser" )
__lowerCAmelCase = soup.findAll("h1" )
__lowerCAmelCase = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(lowerCamelCase , lowerCamelCase )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'{key}\n{value}\n')
| 39
| 0
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
lowerCAmelCase : Any = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
lowerCAmelCase : Dict = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = ''' Hello world! cécé herlolip'''
lowerCAmelCase : Optional[Any] = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def __lowerCAmelCase ( lowerCamelCase : List[str] ):
'''simple docstring'''
__lowerCAmelCase = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowerCAmelCase = dct.pop(lowerCamelCase )
__lowerCAmelCase = val
def __lowerCAmelCase ( lowerCamelCase : str ):
'''simple docstring'''
__lowerCAmelCase = torch.load(lowerCamelCase , map_location="cpu" )
__lowerCAmelCase = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def __lowerCAmelCase ( lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = emb.weight.shape
__lowerCAmelCase = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
__lowerCAmelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
if not os.path.exists(lowerCamelCase ):
__lowerCAmelCase = torch.hub.load("pytorch/fairseq" , lowerCamelCase ).eval()
else:
__lowerCAmelCase = load_xsum_checkpoint(lowerCamelCase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__lowerCAmelCase = checkpoint_path.replace("." , "-" )
__lowerCAmelCase = BartConfig.from_pretrained(lowerCamelCase )
__lowerCAmelCase = bart.encode(lowerCamelCase ).unsqueeze(0 )
__lowerCAmelCase = BartTokenizer.from_pretrained(lowerCamelCase ).encode(lowerCamelCase , return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(lowerCamelCase , lowerCamelCase ).all():
raise ValueError(
f'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
__lowerCAmelCase = bart.state_dict()
remove_ignore_keys_(lowerCamelCase )
__lowerCAmelCase = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = BartForSequenceClassification(lowerCamelCase ).eval()
model.load_state_dict(lowerCamelCase )
__lowerCAmelCase = bart.predict("mnli" , lowerCamelCase , return_logits=lowerCamelCase )
__lowerCAmelCase = model(lowerCamelCase )[0] # logits
else: # no classification heads to worry about
__lowerCAmelCase = bart.model.state_dict()
remove_ignore_keys_(lowerCamelCase )
__lowerCAmelCase = state_dict["decoder.embed_tokens.weight"]
__lowerCAmelCase = bart.extract_features(lowerCamelCase )
if hf_checkpoint_name == "facebook/bart-large":
__lowerCAmelCase = BartModel(lowerCamelCase ).eval()
model.load_state_dict(lowerCamelCase )
__lowerCAmelCase = model(lowerCamelCase ).model[0]
else:
__lowerCAmelCase = BartForConditionalGeneration(lowerCamelCase ).eval() # an existing summarization ckpt
model.model.load_state_dict(lowerCamelCase )
if hasattr(lowerCamelCase , "lm_head" ):
__lowerCAmelCase = make_linear_from_emb(model.model.shared )
__lowerCAmelCase = model.model(lowerCamelCase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
lowerCAmelCase : List[str] = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 705
|
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
if len(lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
__lowerCAmelCase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase ) )
]
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase ) )
]
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
if len(lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
__lowerCAmelCase = len(lowerCamelCase )
__lowerCAmelCase = matrix_length // 2
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase )]
__lowerCAmelCase = [
[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )
]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase )]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )]
return top_left, top_right, bot_left, bot_right
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
return len(lowerCamelCase ), len(matrix[0] )
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
print("\n".join(str(lowerCamelCase ) for line in matrix ) )
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
if matrix_dimensions(lowerCamelCase ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase )
__lowerCAmelCase = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
__lowerCAmelCase = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase )
__lowerCAmelCase = matrix_addition(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = matrix_addition(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase )
# construct the new matrix from our 4 quadrants
__lowerCAmelCase = []
for i in range(len(lowerCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
if matrix_dimensions(lowerCamelCase )[1] != matrix_dimensions(lowerCamelCase )[0]:
__lowerCAmelCase = (
"Unable to multiply these matrices, please check the dimensions.\n"
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(lowerCamelCase )
__lowerCAmelCase = matrix_dimensions(lowerCamelCase )
__lowerCAmelCase = matrix_dimensions(lowerCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCAmelCase = max(*lowerCamelCase , *lowerCamelCase )
__lowerCAmelCase = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase ) ) ) )
__lowerCAmelCase = matrixa
__lowerCAmelCase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__lowerCAmelCase = actual_strassen(lowerCamelCase , lowerCamelCase )
# Removing the additional zeros
for i in range(0 , lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCAmelCase : Tuple = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCAmelCase : Any = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 39
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
lowerCAmelCase : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class UpperCAmelCase__ :
a : Optional[str] = field(default=UpperCamelCase__ , metadata={"""help""": """The input training data file (a text file)."""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCAmelCase_ ( self ) -> Tuple:
if self.train_file is not None:
__lowerCAmelCase = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__lowerCAmelCase = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCAmelCase__ :
a : PreTrainedTokenizerBase
a : Union[bool, str, PaddingStrategy] = True
a : Optional[int] = None
a : Optional[int] = None
def __call__( self , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = "label" if "label" in features[0].keys() else "labels"
__lowerCAmelCase = [feature.pop(UpperCamelCase ) for feature in features]
__lowerCAmelCase = len(UpperCamelCase )
__lowerCAmelCase = len(features[0]["input_ids"] )
__lowerCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCamelCase )] for feature in features
]
__lowerCAmelCase = list(chain(*UpperCamelCase ) )
__lowerCAmelCase = self.tokenizer.pad(
UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
__lowerCAmelCase = {k: v.view(UpperCamelCase , UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
__lowerCAmelCase = torch.tensor(UpperCamelCase , dtype=torch.intaa )
return batch
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__lowerCAmelCase = {}
if data_args.train_file is not None:
__lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCAmelCase = data_args.validation_file
__lowerCAmelCase = data_args.train_file.split("." )[-1]
__lowerCAmelCase = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__lowerCAmelCase = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__lowerCAmelCase = [f'''ending{i}''' for i in range(4 )]
__lowerCAmelCase = "sent1"
__lowerCAmelCase = "sent2"
if data_args.max_seq_length is None:
__lowerCAmelCase = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
__lowerCAmelCase = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
__lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase : Tuple ):
__lowerCAmelCase = [[context] * 4 for context in examples[context_name]]
__lowerCAmelCase = examples[question_header_name]
__lowerCAmelCase = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
__lowerCAmelCase = list(chain(*lowerCamelCase ) )
__lowerCAmelCase = list(chain(*lowerCamelCase ) )
# Tokenize
__lowerCAmelCase = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
__lowerCAmelCase = raw_datasets["train"]
if data_args.max_train_samples is not None:
__lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_train_samples )
__lowerCAmelCase = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
__lowerCAmelCase = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
__lowerCAmelCase = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
__lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_eval_samples )
__lowerCAmelCase = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
__lowerCAmelCase = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__lowerCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase : Dict ):
__lowerCAmelCase , __lowerCAmelCase = eval_predictions
__lowerCAmelCase = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
__lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("train" , lowerCamelCase )
trainer.save_metrics("train" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
__lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("eval" , lowerCamelCase )
trainer.save_metrics("eval" , lowerCamelCase )
__lowerCAmelCase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 706
|
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowerCAmelCase : Optional[Any] = '''scheduler_config.json'''
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : str = 1
a : Optional[int] = 2
a : int = 3
a : Union[str, Any] = 4
a : int = 5
a : Optional[int] = 6
a : str = 7
a : List[Any] = 8
a : List[str] = 9
a : List[str] = 1_0
a : int = 1_1
a : Any = 1_2
a : Any = 1_3
a : Tuple = 1_4
@dataclass
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : torch.FloatTensor
class UpperCAmelCase__ :
a : Tuple = SCHEDULER_CONFIG_NAME
a : Union[str, Any] = []
a : str = True
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase=False , **UpperCamelCase , ) -> int:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = cls.load_config(
pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , )
return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False , **UpperCamelCase ) -> Dict:
self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase )
@property
def UpperCAmelCase_ ( self ) -> str:
return self._get_compatibles()
@classmethod
def UpperCAmelCase_ ( cls ) -> Tuple:
__lowerCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
__lowerCAmelCase = importlib.import_module(__name__.split("." )[0] )
__lowerCAmelCase = [
getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase )
]
return compatible_classes
| 39
| 0
|
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : List[Any] = """facebook/bart-large-mnli"""
a : str = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
a : int = """text_classifier"""
a : int = AutoTokenizer
a : Tuple = AutoModelForSequenceClassification
a : str = ["""text""", ["""text"""]]
a : Union[str, Any] = ["""text"""]
def UpperCAmelCase_ ( self ) -> str:
super().setup()
__lowerCAmelCase = self.model.config
__lowerCAmelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
__lowerCAmelCase = int(UpperCamelCase )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
__lowerCAmelCase = labels
return self.pre_processor(
[text] * len(UpperCamelCase ) , [F'''This example is {label}''' for label in labels] , return_tensors="pt" , padding="max_length" , )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[str]:
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 707
|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase : List[Any] = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase = None ) -> Union[str, Any]:
__lowerCAmelCase = (
os.path.join(UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__lowerCAmelCase = Extractor
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__lowerCAmelCase = os.path.abspath(UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase ) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> bool:
return force_extract or (
not os.path.isfile(UpperCamelCase ) and not (os.path.isdir(UpperCamelCase ) and os.listdir(UpperCamelCase ))
)
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False ) -> str:
__lowerCAmelCase = self.extractor.infer_extractor_format(UpperCamelCase )
if not extractor_format:
return input_path
__lowerCAmelCase = self._get_output_path(UpperCamelCase )
if self._do_extract(UpperCamelCase , UpperCamelCase ):
self.extractor.extract(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return output_path
class UpperCAmelCase__ ( UpperCamelCase__ ):
@classmethod
@abstractmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool:
...
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
...
class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
a : List[bytes] = []
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
with open(UpperCamelCase , "rb" ) as f:
return f.read(UpperCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool:
if not magic_number:
__lowerCAmelCase = max(len(UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
__lowerCAmelCase = cls.read_magic_number(UpperCamelCase , UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase__ ( UpperCamelCase__ ):
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool:
return tarfile.is_tarfile(UpperCamelCase )
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict:
def resolved(UpperCamelCase ) -> str:
return os.path.realpath(os.path.abspath(UpperCamelCase ) )
def badpath(UpperCamelCase , UpperCamelCase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCamelCase , UpperCamelCase ) ).startswith(UpperCamelCase )
def badlink(UpperCamelCase , UpperCamelCase ) -> bool:
# Links are interpreted relative to the directory containing the link
__lowerCAmelCase = resolved(os.path.join(UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=UpperCamelCase )
__lowerCAmelCase = resolved(UpperCamelCase )
for finfo in members:
if badpath(finfo.name , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(UpperCamelCase , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(UpperCamelCase , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
__lowerCAmelCase = tarfile.open(UpperCamelCase )
tar_file.extractall(UpperCamelCase , members=TarExtractor.safemembers(UpperCamelCase , UpperCamelCase ) )
tar_file.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x1F\x8B"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with gzip.open(UpperCamelCase , "rb" ) as gzip_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : List[Any] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool:
if super().is_extractable(UpperCamelCase , magic_number=UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCamelCase , "rb" ) as fp:
__lowerCAmelCase = _EndRecData(UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__lowerCAmelCase = fp.read(UpperCamelCase ) # CD is where we expect it to be
if len(UpperCamelCase ) == sizeCentralDir:
__lowerCAmelCase = struct.unpack(UpperCamelCase , UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with zipfile.ZipFile(UpperCamelCase , "r" ) as zip_file:
zip_file.extractall(UpperCamelCase )
zip_file.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Tuple = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with lzma.open(UpperCamelCase ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : str = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
__lowerCAmelCase = rarfile.RarFile(UpperCamelCase )
rf.extractall(UpperCamelCase )
rf.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : int = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
__lowerCAmelCase = zstd.ZstdDecompressor()
with open(UpperCamelCase , "rb" ) as ifh, open(UpperCamelCase , "wb" ) as ofh:
dctx.copy_stream(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with bza.open(UpperCamelCase , "rb" ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with pyazr.SevenZipFile(UpperCamelCase , "r" ) as archive:
archive.extractall(UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(UpperCamelCase , "rb" ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
a : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCAmelCase_ ( cls ) -> Optional[Any]:
return max(
len(UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(UpperCamelCase , UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict:
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase , magic_number_length=UpperCamelCase )
except OSError:
return b""
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = False ) -> bool:
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=UpperCamelCase , )
__lowerCAmelCase = cls.infer_extractor_format(UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase ) -> str: # <Added version="2.4.0"/>
__lowerCAmelCase = cls._get_magic_number_max_length()
__lowerCAmelCase = cls._read_magic_number(UpperCamelCase , UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCamelCase , magic_number=UpperCamelCase ):
return extractor_format
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(UpperCamelCase ) , exist_ok=UpperCamelCase )
# Prevent parallel extractions
__lowerCAmelCase = str(Path(UpperCamelCase ).with_suffix(".lock" ) )
with FileLock(UpperCamelCase ):
shutil.rmtree(UpperCamelCase , ignore_errors=UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCamelCase , UpperCamelCase ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=UpperCamelCase , )
__lowerCAmelCase = extractor if extractor != "deprecated" else extractor_format
else:
__lowerCAmelCase = cls.extractors[extractor_format]
return extractor.extract(UpperCamelCase , UpperCamelCase )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCamelCase ):
return extractor.extract(UpperCamelCase , UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
def __lowerCAmelCase ( lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
__lowerCAmelCase = set()
# Replace all the whitespace in our sentence
__lowerCAmelCase = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowerCamelCase ) == 26
def __lowerCAmelCase ( lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
__lowerCAmelCase = [False] * 26
for char in input_str:
if char.islower():
__lowerCAmelCase = True
elif char.isupper():
__lowerCAmelCase = True
return all(lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def __lowerCAmelCase ( ):
'''simple docstring'''
from timeit import timeit
__lowerCAmelCase = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=lowerCamelCase ) )
print(timeit("is_pangram_faster()" , setup=lowerCamelCase ) )
print(timeit("is_pangram_fastest()" , setup=lowerCamelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 708
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self ) -> List[str]:
# test for the above condition
self.test()
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = 0
__lowerCAmelCase = False
while not completed:
if counter == 1:
self.reset()
__lowerCAmelCase = self.advance()
if not self.does_advance(UpperCamelCase ):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.update(UpperCamelCase )
counter += 1
if counter > 1_0000:
raise Exception("update() does not fulfill the constraint." )
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly." )
@abstractmethod
def UpperCAmelCase_ ( self ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self ) -> int:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self ) -> int:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> str:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self , UpperCamelCase ) -> Dict:
super(UpperCamelCase , self ).__init__()
if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
__lowerCAmelCase = token_ids
__lowerCAmelCase = len(self.token_ids )
__lowerCAmelCase = -1 # the index of the currently fulfilled step
__lowerCAmelCase = False
def UpperCAmelCase_ ( self ) -> Optional[int]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
if self.does_advance(UpperCamelCase ):
self.fulfilled_idx += 1
__lowerCAmelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
__lowerCAmelCase = True
__lowerCAmelCase = completed
else:
# failed to make progress.
__lowerCAmelCase = True
self.reset()
return stepped, completed, reset
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = False
__lowerCAmelCase = 0
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Optional[Any]:
__lowerCAmelCase = PhrasalConstraint(self.token_ids )
if stateful:
__lowerCAmelCase = self.seqlen
__lowerCAmelCase = self.fulfilled_idx
__lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase , UpperCamelCase=True ) -> Optional[int]:
__lowerCAmelCase = max([len(UpperCamelCase ) for one in nested_token_ids] )
__lowerCAmelCase = {}
for token_ids in nested_token_ids:
__lowerCAmelCase = root
for tidx, token_id in enumerate(UpperCamelCase ):
if token_id not in level:
__lowerCAmelCase = {}
__lowerCAmelCase = level[token_id]
if no_subsets and self.has_subsets(UpperCamelCase , UpperCamelCase ):
raise ValueError(
"Each list in `nested_token_ids` can't be a complete subset of another list, but is"
F''' {nested_token_ids}.''' )
__lowerCAmelCase = root
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
__lowerCAmelCase = self.trie
for current_token in current_seq:
__lowerCAmelCase = start[current_token]
__lowerCAmelCase = list(start.keys() )
return next_tokens
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
__lowerCAmelCase = self.next_tokens(UpperCamelCase )
return len(UpperCamelCase ) == 0
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = list(root.values() )
if len(UpperCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCamelCase ) for nn in next_nodes] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
__lowerCAmelCase = self.count_leaves(UpperCamelCase )
return len(UpperCamelCase ) != leaf_count
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self , UpperCamelCase ) -> List[Any]:
super(UpperCamelCase , self ).__init__()
if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(UpperCamelCase , UpperCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
__lowerCAmelCase = DisjunctiveTrie(UpperCamelCase )
__lowerCAmelCase = nested_token_ids
__lowerCAmelCase = self.trie.max_height
__lowerCAmelCase = []
__lowerCAmelCase = False
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = self.trie.next_tokens(self.current_seq )
if len(UpperCamelCase ) == 0:
return None
else:
return token_list
def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[str]:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
if self.does_advance(UpperCamelCase ):
self.current_seq.append(UpperCamelCase )
__lowerCAmelCase = True
else:
__lowerCAmelCase = True
self.reset()
__lowerCAmelCase = self.trie.reached_leaf(self.current_seq )
__lowerCAmelCase = completed
return stepped, completed, reset
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = False
__lowerCAmelCase = []
def UpperCAmelCase_ ( self ) -> int:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Union[str, Any]:
__lowerCAmelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
__lowerCAmelCase = self.seqlen
__lowerCAmelCase = self.current_seq
__lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase ) -> Union[str, Any]:
__lowerCAmelCase = constraints
# max # of steps required to fulfill a given constraint
__lowerCAmelCase = max([c.seqlen for c in constraints] )
__lowerCAmelCase = len(UpperCamelCase )
__lowerCAmelCase = False
self.init_state()
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = []
__lowerCAmelCase = None
__lowerCAmelCase = [constraint.copy(stateful=UpperCamelCase ) for constraint in self.constraints]
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__lowerCAmelCase = constraint.advance()
if isinstance(UpperCamelCase , UpperCamelCase ):
token_list.append(UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
token_list.extend(UpperCamelCase )
else:
__lowerCAmelCase = self.inprogress_constraint.advance()
if isinstance(UpperCamelCase , UpperCamelCase ):
token_list.append(UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
token_list.extend(UpperCamelCase )
if len(UpperCamelCase ) == 0:
return None
else:
return token_list
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__lowerCAmelCase , __lowerCAmelCase = self.add(UpperCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Dict:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
__lowerCAmelCase , __lowerCAmelCase = False, False
if self.completed:
__lowerCAmelCase = True
__lowerCAmelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.inprogress_constraint.update(UpperCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase ) )
__lowerCAmelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__lowerCAmelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
__lowerCAmelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCamelCase ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = pending_constraint.update(UpperCamelCase )
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true." )
if complete:
self.complete_constraints.append(UpperCamelCase )
__lowerCAmelCase = None
if not complete and stepped:
__lowerCAmelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__lowerCAmelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__lowerCAmelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCAmelCase_ ( self , UpperCamelCase=True ) -> str:
__lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__lowerCAmelCase = [
constraint.copy(stateful=UpperCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__lowerCAmelCase = self.inprogress_constraint.copy(stateful=UpperCamelCase )
__lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 39
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ):
a : List[Any] = KandinskyImgaImgPipeline
a : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
a : List[Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
a : Any = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Union[str, Any] = False
@property
def UpperCAmelCase_ ( self ) -> int:
return 32
@property
def UpperCAmelCase_ ( self ) -> List[str]:
return 32
@property
def UpperCAmelCase_ ( self ) -> Dict:
return self.time_input_dim
@property
def UpperCAmelCase_ ( self ) -> int:
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ) -> int:
return 100
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
__lowerCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__lowerCAmelCase = MultilingualCLIP(UpperCamelCase )
__lowerCAmelCase = text_encoder.eval()
return text_encoder
@property
def UpperCAmelCase_ ( self ) -> List[str]:
torch.manual_seed(0 )
__lowerCAmelCase = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__lowerCAmelCase = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def UpperCAmelCase_ ( self ) -> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = self.dummy_tokenizer
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
__lowerCAmelCase = DDIMScheduler(**UpperCamelCase )
__lowerCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=0 ) -> Optional[Any]:
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((256, 256) )
if str(UpperCamelCase ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(UpperCamelCase )
else:
__lowerCAmelCase = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
__lowerCAmelCase = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = "cpu"
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**UpperCamelCase )
__lowerCAmelCase = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__lowerCAmelCase = "A red cartoon frog, 4k"
__lowerCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
__lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__lowerCAmelCase = pipeline(
UpperCamelCase , image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 709
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ):
a : List[Any] = KandinskyImgaImgPipeline
a : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
a : List[Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
a : Any = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Union[str, Any] = False
@property
def UpperCAmelCase_ ( self ) -> int:
return 32
@property
def UpperCAmelCase_ ( self ) -> List[str]:
return 32
@property
def UpperCAmelCase_ ( self ) -> Dict:
return self.time_input_dim
@property
def UpperCAmelCase_ ( self ) -> int:
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ) -> int:
return 100
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
__lowerCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__lowerCAmelCase = MultilingualCLIP(UpperCamelCase )
__lowerCAmelCase = text_encoder.eval()
return text_encoder
@property
def UpperCAmelCase_ ( self ) -> List[str]:
torch.manual_seed(0 )
__lowerCAmelCase = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__lowerCAmelCase = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def UpperCAmelCase_ ( self ) -> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = self.dummy_tokenizer
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
__lowerCAmelCase = DDIMScheduler(**UpperCamelCase )
__lowerCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=0 ) -> Optional[Any]:
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((256, 256) )
if str(UpperCamelCase ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(UpperCamelCase )
else:
__lowerCAmelCase = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
__lowerCAmelCase = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = "cpu"
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**UpperCamelCase )
__lowerCAmelCase = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__lowerCAmelCase = "A red cartoon frog, 4k"
__lowerCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
__lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__lowerCAmelCase = pipeline(
UpperCamelCase , image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : Dict = '''▁'''
lowerCAmelCase : int = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase : List[str] = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowerCAmelCase : Dict = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1_0_2_4,
}
# fmt: off
lowerCAmelCase : Optional[Any] = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Union[str, Any] = VOCAB_FILES_NAMES
a : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a : List[str] = ["""input_ids""", """attention_mask"""]
a : List[int] = []
a : List[int] = []
def __init__( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="</s>" , UpperCamelCase="</s>" , UpperCamelCase="<s>" , UpperCamelCase="<unk>" , UpperCamelCase="<pad>" , UpperCamelCase="<mask>" , UpperCamelCase = None , **UpperCamelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
__lowerCAmelCase = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=UpperCamelCase , tgt_lang=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase , )
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase ) )
__lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__lowerCAmelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowerCAmelCase = 1
__lowerCAmelCase = len(self.sp_model )
__lowerCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase )
}
__lowerCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
__lowerCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__lowerCAmelCase = src_lang if src_lang is not None else "en_XX"
__lowerCAmelCase = self.lang_code_to_id[self._src_lang]
__lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCAmelCase_ ( self ) -> int:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCAmelCase_ ( self ) -> str:
return self._src_lang
@src_lang.setter
def UpperCAmelCase_ ( self , UpperCamelCase ) -> None:
__lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Dict:
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self , UpperCamelCase ) -> None:
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[str]:
return self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCAmelCase = self.sp_model.PieceToId(UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
__lowerCAmelCase = []
__lowerCAmelCase = ""
__lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase ) + token
__lowerCAmelCase = True
__lowerCAmelCase = []
else:
current_sub_tokens.append(UpperCamelCase )
__lowerCAmelCase = False
out_string += self.sp_model.decode(UpperCamelCase )
return out_string.strip()
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCAmelCase = os.path.join(
UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase , "wb" ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (out_vocab_file,)
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
__lowerCAmelCase = [1] * len(self.prefix_tokens )
__lowerCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase )) + ([0] * len(UpperCamelCase )) + suffix_ones
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
__lowerCAmelCase = src_lang
__lowerCAmelCase = self(UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
__lowerCAmelCase = self.convert_tokens_to_ids(UpperCamelCase )
__lowerCAmelCase = tgt_lang_id
return inputs
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = "en_XX" , UpperCamelCase = None , UpperCamelCase = "ro_RO" , **UpperCamelCase , ) -> BatchEncoding:
__lowerCAmelCase = src_lang
__lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase , UpperCamelCase , **UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase_ ( self ) -> Any:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> None:
__lowerCAmelCase = self.lang_code_to_id[src_lang]
__lowerCAmelCase = [self.cur_lang_code_id]
__lowerCAmelCase = [self.eos_token_id]
def UpperCAmelCase_ ( self , UpperCamelCase ) -> None:
__lowerCAmelCase = self.lang_code_to_id[tgt_lang]
__lowerCAmelCase = [self.cur_lang_code_id]
__lowerCAmelCase = [self.eos_token_id]
| 710
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
lowerCAmelCase : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class UpperCAmelCase__ :
a : Optional[str] = field(default=UpperCamelCase__ , metadata={"""help""": """The input training data file (a text file)."""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCAmelCase_ ( self ) -> Tuple:
if self.train_file is not None:
__lowerCAmelCase = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__lowerCAmelCase = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCAmelCase__ :
a : PreTrainedTokenizerBase
a : Union[bool, str, PaddingStrategy] = True
a : Optional[int] = None
a : Optional[int] = None
def __call__( self , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = "label" if "label" in features[0].keys() else "labels"
__lowerCAmelCase = [feature.pop(UpperCamelCase ) for feature in features]
__lowerCAmelCase = len(UpperCamelCase )
__lowerCAmelCase = len(features[0]["input_ids"] )
__lowerCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCamelCase )] for feature in features
]
__lowerCAmelCase = list(chain(*UpperCamelCase ) )
__lowerCAmelCase = self.tokenizer.pad(
UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
__lowerCAmelCase = {k: v.view(UpperCamelCase , UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
__lowerCAmelCase = torch.tensor(UpperCamelCase , dtype=torch.intaa )
return batch
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__lowerCAmelCase = {}
if data_args.train_file is not None:
__lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCAmelCase = data_args.validation_file
__lowerCAmelCase = data_args.train_file.split("." )[-1]
__lowerCAmelCase = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__lowerCAmelCase = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__lowerCAmelCase = [f'''ending{i}''' for i in range(4 )]
__lowerCAmelCase = "sent1"
__lowerCAmelCase = "sent2"
if data_args.max_seq_length is None:
__lowerCAmelCase = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
__lowerCAmelCase = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
__lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase : Tuple ):
__lowerCAmelCase = [[context] * 4 for context in examples[context_name]]
__lowerCAmelCase = examples[question_header_name]
__lowerCAmelCase = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
__lowerCAmelCase = list(chain(*lowerCamelCase ) )
__lowerCAmelCase = list(chain(*lowerCamelCase ) )
# Tokenize
__lowerCAmelCase = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
__lowerCAmelCase = raw_datasets["train"]
if data_args.max_train_samples is not None:
__lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_train_samples )
__lowerCAmelCase = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
__lowerCAmelCase = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
__lowerCAmelCase = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
__lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_eval_samples )
__lowerCAmelCase = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
__lowerCAmelCase = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__lowerCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase : Dict ):
__lowerCAmelCase , __lowerCAmelCase = eval_predictions
__lowerCAmelCase = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
__lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("train" , lowerCamelCase )
trainer.save_metrics("train" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
__lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("eval" , lowerCamelCase )
trainer.save_metrics("eval" , lowerCamelCase )
__lowerCAmelCase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 39
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.